diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 6103dca03..e2bb19bdb 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -40,6 +40,7 @@ jobs: kind load docker-image --name=kind quay.io/open-cluster-management/registration:e2e kind load docker-image --name=kind quay.io/open-cluster-management/work:e2e kind load docker-image --name=kind quay.io/open-cluster-management/placement:e2e + kind load docker-image --name=kind quay.io/open-cluster-management/addon-manager:e2e - name: Test E2E run: | IMAGE_TAG=e2e make test-e2e diff --git a/.github/workflows/post.yml b/.github/workflows/post.yml index 214e48b13..a6726c36a 100644 --- a/.github/workflows/post.yml +++ b/.github/workflows/post.yml @@ -68,6 +68,7 @@ jobs: docker push quay.io/open-cluster-management/registration:latest-${{ matrix.arch }} docker push quay.io/open-cluster-management/work:latest-${{ matrix.arch }} docker push quay.io/open-cluster-management/placement:latest-${{ matrix.arch }} + docker push quay.io/open-cluster-management/addon-manager:latest-${{ matrix.arch }} image-manifest: name: image manifest runs-on: ubuntu-latest @@ -94,6 +95,10 @@ jobs: docker manifest create quay.io/open-cluster-management/placement:latest \ quay.io/open-cluster-management/placement:latest-amd64 \ quay.io/open-cluster-management/placement:latest-arm64 + # addon-manager + docker manifest create quay.io/open-cluster-management/addon-manager:latest \ + quay.io/open-cluster-management/addon-manager:latest-amd64 \ + quay.io/open-cluster-management/addon-manager:latest-arm64 - name: annotate run: | # registration-operator @@ -116,9 +121,15 @@ jobs: quay.io/open-cluster-management/placement:latest-amd64 --arch amd64 docker manifest annotate quay.io/open-cluster-management/placement:latest \ quay.io/open-cluster-management/placement:latest-arm64 --arch arm64 + # addon-manager + docker manifest annotate quay.io/open-cluster-management/addon-manager:latest \ + quay.io/open-cluster-management/addon-manager:latest-amd64 --arch amd64 + docker manifest annotate quay.io/open-cluster-management/addon-manager:latest \ + quay.io/open-cluster-management/addon-manager:latest-arm64 --arch arm64 - name: push run: | docker manifest push quay.io/open-cluster-management/registration-operator:latest docker manifest push quay.io/open-cluster-management/registration:latest docker manifest push quay.io/open-cluster-management/work:latest docker manifest push quay.io/open-cluster-management/placement:latest + docker manifest push quay.io/open-cluster-management/addon-manager:latest diff --git a/.gitignore b/.gitignore index cbaf3221f..df4ba9c2b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ /registration /work /registration-operator +/addon *.exe *.dll diff --git a/Makefile b/Makefile index 87e7681b1..8f2f31f59 100644 --- a/Makefile +++ b/Makefile @@ -40,12 +40,13 @@ REGISTRATION_IMAGE ?= $(IMAGE_REGISTRY)/registration:$(IMAGE_TAG) # PLACEMENT_IMAGE can be set in the env to override calculated value PLACEMENT_IMAGE ?= $(IMAGE_REGISTRY)/placement:$(IMAGE_TAG) # ADDON_MANAGER_IMAGE can be set in the env to override calculated value -ADDON_MANAGER_IMAGE ?= $(IMAGE_REGISTRY)/addon-manager:latest +ADDON_MANAGER_IMAGE ?= $(IMAGE_REGISTRY)/addon-manager:$(IMAGE_TAG) $(call build-image,registration,$(REGISTRATION_IMAGE),./build/Dockerfile.registration,.) $(call build-image,work,$(WORK_IMAGE),./build/Dockerfile.work,.) $(call build-image,placement,$(PLACEMENT_IMAGE),./build/Dockerfile.placement,.) $(call build-image,registration-operator,$(OPERATOR_IMAGE_NAME),./build/Dockerfile.registration-operator,.) +$(call build-image,addon-manager,$(ADDON_MANAGER_IMAGE),./build/Dockerfile.addon,.) copy-crd: bash -x hack/copy-crds.sh diff --git a/build/Dockerfile.addon b/build/Dockerfile.addon new file mode 100644 index 000000000..b8799492b --- /dev/null +++ b/build/Dockerfile.addon @@ -0,0 +1,17 @@ +FROM golang:1.20-bullseye AS builder +ARG OS=linux +ARG ARCH=amd64 +WORKDIR /go/src/open-cluster-management.io/ocm +COPY . . +ENV GO_PACKAGE open-cluster-management.io/ocm + +RUN GOOS=${OS} \ + GOARCH=${ARCH} \ + GO_BUILD_PACKAGES=./cmd/addon \ + make build --warn-undefined-variables + +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest +ENV USER_UID=10001 +COPY --from=builder /go/src/open-cluster-management.io/ocm/addon / + +USER ${USER_UID} diff --git a/cmd/addon/main.go b/cmd/addon/main.go new file mode 100644 index 000000000..0e5ea4803 --- /dev/null +++ b/cmd/addon/main.go @@ -0,0 +1,54 @@ +package main + +import ( + goflag "flag" + "fmt" + "math/rand" + "os" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + + "open-cluster-management.io/ocm/pkg/cmd/hub" + "open-cluster-management.io/ocm/pkg/version" +) + +func main() { + rand.Seed(time.Now().UTC().UnixNano()) + + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + + logs.AddFlags(pflag.CommandLine) + logs.InitLogs() + defer logs.FlushLogs() + + command := newAddonCommand() + if err := command.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +func newAddonCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "addon", + Short: "Manager of Addon", + Run: func(cmd *cobra.Command, args []string) { + _ = cmd.Help() + os.Exit(1) + }, + } + + if v := version.Get().String(); len(v) == 0 { + cmd.Version = "" + } else { + cmd.Version = v + } + + cmd.AddCommand(hub.NewAddonManager()) + return cmd +} diff --git a/go.mod b/go.mod index af5244e66..ff36485eb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/davecgh/go-spew v1.1.1 - github.com/evanphx/json-patch v4.12.0+incompatible + github.com/evanphx/json-patch v5.6.0+incompatible github.com/google/go-cmp v0.5.9 github.com/onsi/ginkgo/v2 v2.9.5 github.com/onsi/gomega v1.27.7 @@ -15,6 +15,7 @@ require ( github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.2 + github.com/valyala/fasttemplate v1.2.2 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/net v0.10.0 k8s.io/api v0.27.2 @@ -26,6 +27,7 @@ require ( k8s.io/klog/v2 v2.90.1 k8s.io/kube-aggregator v0.27.2 k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 + open-cluster-management.io/addon-framework v0.7.1-0.20230626092851-963716af4eed open-cluster-management.io/api v0.11.1-0.20230609103311-088e8fe86139 sigs.k8s.io/controller-runtime v0.15.0 sigs.k8s.io/kube-storage-version-migrator v0.0.5 @@ -33,17 +35,23 @@ require ( require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/BurntSushi/toml v1.2.1 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect - github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.4.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fatih/structs v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect @@ -54,6 +62,7 @@ require ( github.com/go-openapi/jsonreference v0.20.1 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -64,13 +73,16 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -82,8 +94,14 @@ require ( github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/robfig/cron v1.2.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect + github.com/spf13/cast v1.4.1 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.7 // indirect @@ -100,7 +118,7 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.1.0 // indirect + golang.org/x/crypto v0.5.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/sync v0.2.0 // indirect golang.org/x/sys v0.8.0 // indirect @@ -117,6 +135,7 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + helm.sh/helm/v3 v3.11.1 // indirect k8s.io/kms v0.27.2 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect diff --git a/go.sum b/go.sum index 64f26d5da..8a14e3076 100644 --- a/go.sum +++ b/go.sum @@ -51,9 +51,16 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -77,8 +84,9 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -131,6 +139,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -158,11 +168,13 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -246,6 +258,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -373,10 +387,13 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -434,6 +451,9 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -443,6 +463,9 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -458,11 +481,13 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -530,6 +555,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -544,6 +571,9 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -585,7 +615,17 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7 github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -594,6 +634,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -667,8 +708,10 @@ golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -703,6 +746,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -745,7 +789,10 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -766,6 +813,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -818,13 +866,19 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -835,6 +889,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -900,6 +956,7 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1019,6 +1076,7 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXL gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1038,6 +1096,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +helm.sh/helm/v3 v3.11.1 h1:cmL9fFohOoNQf+wnp2Wa0OhNFH0KFnSzEkVxi3fcc3I= +helm.sh/helm/v3 v3.11.1/go.mod h1:z/Bu/BylToGno/6dtNGuSmjRqxKq5gaH+FU0BPO+AQ8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1096,6 +1156,8 @@ k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY= k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +open-cluster-management.io/addon-framework v0.7.1-0.20230626092851-963716af4eed h1:fOOKf8kzVNizc5fYvMwkPy9TT/vOpojd4IIxpzh/vhw= +open-cluster-management.io/addon-framework v0.7.1-0.20230626092851-963716af4eed/go.mod h1:Cyt5knxR+sXaKvOfUKseZDAGulS2AJz6o7a9J0WXbak= open-cluster-management.io/api v0.11.1-0.20230609103311-088e8fe86139 h1:nw/XSv4eDGqmg0ks2PHzrE2uosvjw+D314843G56xGY= open-cluster-management.io/api v0.11.1-0.20230609103311-088e8fe86139/go.mod h1:WgKUCJ7+Bf40DsOmH1Gdkpyj3joco+QLzrlM6Ak39zE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml b/manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml index 17626d385..f0239cede 100644 --- a/manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml +++ b/manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml @@ -43,7 +43,7 @@ spec: - name: addon-manager-controller image: {{ .AddOnManagerImage }} args: - - "/addon-manager" + - "/addon" - "manager" {{ if .HostedMode }} - "--kubeconfig=/var/run/secrets/hub/kubeconfig" diff --git a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler.go b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler.go new file mode 100644 index 000000000..d4285fb58 --- /dev/null +++ b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler.go @@ -0,0 +1,114 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type managedClusterAddonConfigurationReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *managedClusterAddonConfigurationReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + + for _, addon := range graph.addonToUpdate() { + mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs) + err := d.patchAddonStatus(ctx, mca, addon.mca) + if err != nil { + errs = append(errs, err) + } + } + + return cma, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *managedClusterAddonConfigurationReconciler) mergeAddonConfig( + mca *addonv1alpha1.ManagedClusterAddOn, desiredConfigMap addonConfigMap) *addonv1alpha1.ManagedClusterAddOn { + mcaCopy := mca.DeepCopy() + + var mergedConfigs []addonv1alpha1.ConfigReference + // remove configs that are not desired + for _, config := range mcaCopy.Status.ConfigReferences { + if _, ok := desiredConfigMap[config.ConfigGroupResource]; ok { + mergedConfigs = append(mergedConfigs, config) + } + } + + // append or update configs + for _, config := range desiredConfigMap { + var match bool + for i := range mergedConfigs { + if mergedConfigs[i].ConfigGroupResource != config.ConfigGroupResource { + continue + } + + match = true + // set LastObservedGeneration to 0 when config name/namespace changes + if mergedConfigs[i].DesiredConfig != nil && (mergedConfigs[i].DesiredConfig.ConfigReferent != config.DesiredConfig.ConfigReferent) { + mergedConfigs[i].LastObservedGeneration = 0 + } + mergedConfigs[i].ConfigReferent = config.ConfigReferent + mergedConfigs[i].DesiredConfig = config.DesiredConfig.DeepCopy() + } + + if !match { + mergedConfigs = append(mergedConfigs, config) + } + } + + mcaCopy.Status.ConfigReferences = mergedConfigs + return mcaCopy +} + +func (d *managedClusterAddonConfigurationReconciler) patchAddonStatus(ctx context.Context, new, old *addonv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: old.Status.Namespace, + ConfigReferences: old.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: new.Status.Namespace, + ConfigReferences: new.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} diff --git a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go new file mode 100644 index 000000000..737a3ec19 --- /dev/null +++ b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go @@ -0,0 +1,630 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "sort" + "testing" + "time" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +func TestAddonConfigReconcile(t *testing.T) { + cases := []struct { + name string + managedClusteraddon []runtime.Object + clusterManagementAddon *addonv1alpha1.ClusterManagementAddOn + placements []runtime.Object + placementDecisions []runtime.Object + validateAddonActions func(t *testing.T, actions []clienttesting.Action) + expectErr bool + }{ + { + name: "no configuration", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").Build(), + placements: []runtime.Object{}, + placementDecisions: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "manual installStrategy", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs(addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).WithDefaultConfigReferences(addonv1alpha1.DefaultConfigReference{ + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "hash", + }, + }).Build(), + placements: []runtime.Object{}, + placementDecisions: []runtime.Object{}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch", "patch") + sort.Sort(byPatchName(actions)) + expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "hash", + }, + LastObservedGeneration: 0, + }}) + }, + }, + { + name: "placement installStrategy", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs(addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).WithDefaultConfigReferences(addonv1alpha1.DefaultConfigReference{ + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "hash", + }, + }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build(), + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch", "patch") + sort.Sort(byPatchName(actions)) + expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "hash", + }, + LastObservedGeneration: 0, + }}) + expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastObservedGeneration: 0, + }}) + }, + }, + { + name: "mca override", + managedClusteraddon: []runtime.Object{ + newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + }}, nil), + addontesting.NewAddon("test", "cluster2"), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs(addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).WithDefaultConfigReferences(addonv1alpha1.DefaultConfigReference{ + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "hash", + }, + }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build(), + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch", "patch") + sort.Sort(byPatchName(actions)) + expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + LastObservedGeneration: 0, + }}) + expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastObservedGeneration: 0, + }}) + }, + }, + { + name: "config name/namespce change", + managedClusteraddon: []runtime.Object{ + newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{}, []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastObservedGeneration: 1, + }}), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs(addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "hash2", + }, + }, + }, + }).Build(), + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "hash2", + }, + LastObservedGeneration: 0, + }}) + }, + }, + { + name: "config spec hash change", + managedClusteraddon: []runtime.Object{ + newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{}, []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastObservedGeneration: 1, + }}), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs(addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + Configs: []addonv1alpha1.AddOnConfig{v1alpha1.AddOnConfig{ + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}}}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1new", + }, + }, + }, + }).Build(), + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1new", + }, + LastObservedGeneration: 1, + }}) + }, + }, + { + name: "mca noop", + managedClusteraddon: []runtime.Object{ + newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{}, []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastObservedGeneration: 1, + }}), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs(addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + Configs: []addonv1alpha1.AddOnConfig{v1alpha1.AddOnConfig{ + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}}}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build(), + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "placement rolling update with MaxConcurrency 1", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + addontesting.NewAddon("test", "cluster3"), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + RolloutStrategy: addonv1alpha1.RolloutStrategy{ + Type: addonv1alpha1.AddonRolloutStrategyRollingUpdate, + RollingUpdate: &addonv1alpha1.RollingUpdate{MaxConcurrency: intstr.FromInt(1)}}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build(), + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + sort.Sort(byPatchName(actions)) + expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastObservedGeneration: 0, + }}) + }, + }, + { + name: "placement rolling update with MaxConcurrency 0", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + addontesting.NewAddon("test", "cluster3"), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + RolloutStrategy: addonv1alpha1.RolloutStrategy{ + Type: addonv1alpha1.AddonRolloutStrategyRollingUpdate, + RollingUpdate: &addonv1alpha1.RollingUpdate{MaxConcurrency: intstr.FromString("0%")}}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build(), + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "placement rolling update with default MaxConcurrency", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + addontesting.NewAddon("test", "cluster3"), + }, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}}, + }, + }, + }, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + RolloutStrategy: addonv1alpha1.RolloutStrategy{ + Type: addonv1alpha1.AddonRolloutStrategyRollingUpdate, + RollingUpdate: &addonv1alpha1.RollingUpdate{MaxConcurrency: defaultMaxConcurrency}}, + }).WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build(), + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + sort.Sort(byPatchName(actions)) + expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastObservedGeneration: 0, + }}) + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + clusterObj := append(c.placements, c.placementDecisions...) + fakeClusterClient := fakecluster.NewSimpleClientset(clusterObj...) + fakeAddonClient := fakeaddon.NewSimpleClientset(c.managedClusteraddon...) + + addonInformers := addoninformers.NewSharedInformerFactory(fakeAddonClient, 10*time.Minute) + clusterInformers := clusterv1informers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute) + + err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ManagedClusterAddonByName: index.IndexManagedClusterAddonByName, + }) + if err != nil { + t.Fatal(err) + } + + for _, obj := range c.placements { + if err := clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + for _, obj := range c.placementDecisions { + if err := clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + for _, obj := range c.managedClusteraddon { + if err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + controller := &addonConfigurationController{ + addonClient: fakeAddonClient, + placementDecisionLister: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister(), + placementLister: clusterInformers.Cluster().V1beta1().Placements().Lister(), + clusterManagementAddonLister: addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Lister(), + managedClusterAddonIndexer: addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetIndexer(), + } + + reconcile := &managedClusterAddonConfigurationReconciler{ + addonClient: fakeAddonClient, + } + + graph, err := controller.buildConfigurationGraph(c.clusterManagementAddon) + if err != nil { + t.Errorf("expected no error when build graph: %v", err) + } + + _, _, err = reconcile.reconcile(context.TODO(), c.clusterManagementAddon, graph) + if err != nil && !c.expectErr { + t.Errorf("expected no error when sync: %v", err) + } + if err == nil && c.expectErr { + t.Errorf("Expect error but got no error") + } + + c.validateAddonActions(t, fakeAddonClient.Actions()) + }) + } +} + +// the Age field. +type byPatchName []clienttesting.Action + +func (a byPatchName) Len() int { return len(a) } +func (a byPatchName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPatchName) Less(i, j int) bool { + patchi := a[i].(clienttesting.PatchActionImpl) + patchj := a[j].(clienttesting.PatchActionImpl) + return patchi.Namespace < patchj.Namespace +} + +func newManagedClusterAddon(name, namespace string, configs []addonv1alpha1.AddOnConfig, configStatus []addonv1alpha1.ConfigReference) *addonv1alpha1.ManagedClusterAddOn { + mca := addontesting.NewAddon(name, namespace) + mca.Spec.Configs = configs + mca.Status.ConfigReferences = configStatus + return mca +} + +func expectPatchConfigurationAction(t *testing.T, action clienttesting.Action, expected []addonv1alpha1.ConfigReference) { + patch := action.(clienttesting.PatchActionImpl).GetPatch() + mca := &addonv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(patch, mca) + if err != nil { + t.Fatal(err) + } + + if !apiequality.Semantic.DeepEqual(mca.Status.ConfigReferences, expected) { + t.Errorf("Configuration not correctly patched, expected %v, actual %v", expected, mca.Status.ConfigReferences) + } +} diff --git a/pkg/addon/controllers/addonconfiguration/controller.go b/pkg/addon/controllers/addonconfiguration/controller.go new file mode 100644 index 000000000..c67499140 --- /dev/null +++ b/pkg/addon/controllers/addonconfiguration/controller.go @@ -0,0 +1,212 @@ +package addonconfiguration + +import ( + "context" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/index" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// addonConfigurationController is a controller to update configuration of mca with the following order +// 1. use configuration in mca spec if it is set +// 2. use configuration in install strategy +// 3. use configuration in the default configuration in cma +type addonConfigurationController struct { + addonClient addonv1alpha1client.Interface + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + managedClusterAddonIndexer cache.Indexer + addonFilterFunc factory.EventFilterFunc + placementLister clusterlisterv1beta1.PlacementLister + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister + + reconcilers []addonConfigurationReconcile +} + +type addonConfigurationReconcile interface { + reconcile(ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, + graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) +} + +type reconcileState int64 + +const ( + reconcileStop reconcileState = iota + reconcileContinue +) + +func NewAddonConfigurationController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + placementInformer clusterinformersv1beta1.PlacementInformer, + placementDecisionInformer clusterinformersv1beta1.PlacementDecisionInformer, + addonFilterFunc factory.EventFilterFunc, + recorder events.Recorder, +) factory.Controller { + c := &addonConfigurationController{ + addonClient: addonClient, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + managedClusterAddonIndexer: addonInformers.Informer().GetIndexer(), + addonFilterFunc: addonFilterFunc, + } + + c.reconcilers = []addonConfigurationReconcile{ + &managedClusterAddonConfigurationReconciler{ + addonClient: addonClient, + }, + &clusterManagementAddonProgressingReconciler{ + addonClient: addonClient, + }, + } + + controllerFactory := factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, + clusterManagementAddonInformers.Informer()).WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()) + + // This is to handle the case the self managed addon-manager does not have placementInformer/placementDecisionInformer. + // we will not consider installStrategy related placement for self managed addon-manager. + if placementInformer != nil && placementDecisionInformer != nil { + controllerFactory = controllerFactory.WithInformersQueueKeysFunc( + index.ClusterManagementAddonByPlacementDecisionQueueKey(clusterManagementAddonInformers), placementDecisionInformer.Informer()). + WithInformersQueueKeysFunc(index.ClusterManagementAddonByPlacementQueueKey(clusterManagementAddonInformers), placementInformer.Informer()) + c.placementLister = placementInformer.Lister() + c.placementDecisionLister = placementDecisionInformer.Lister() + } + + return controllerFactory.WithSync(c.sync).ToController("addon-configuration-controller", recorder) +} + +func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + key := syncCtx.QueueKey() + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + klog.V(4).Infof("Reconciling addon %q", addonName) + + cma, err := c.clusterManagementAddonLister.Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + if !c.addonFilterFunc(cma) { + return nil + } + + cma = cma.DeepCopy() + graph, err := c.buildConfigurationGraph(cma) + if err != nil { + return err + } + + var state reconcileState + var errs []error + for _, reconciler := range c.reconcilers { + cma, state, err = reconciler.reconcile(ctx, cma, graph) + if err != nil { + errs = append(errs, err) + } + if state == reconcileStop { + break + } + } + + return utilerrors.NewAggregate(errs) +} + +func (c *addonConfigurationController) buildConfigurationGraph(cma *addonv1alpha1.ClusterManagementAddOn) (*configurationGraph, error) { + graph := newGraph(cma.Spec.SupportedConfigs, cma.Status.DefaultConfigReferences) + addons, err := c.managedClusterAddonIndexer.ByIndex(index.ManagedClusterAddonByName, cma.Name) + if err != nil { + return graph, err + } + + // add all existing addons to the default at first + for _, addonObject := range addons { + addon := addonObject.(*addonv1alpha1.ManagedClusterAddOn) + graph.addAddonNode(addon) + } + + if cma.Spec.InstallStrategy.Type == "" || cma.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + return graph, nil + } + + // check each install strategy in status + var errs []error + for _, installProgression := range cma.Status.InstallProgressions { + clusters, err := c.getClustersByPlacement(installProgression.PlacementRef.Name, installProgression.PlacementRef.Namespace) + if errors.IsNotFound(err) { + klog.V(2).Infof("placement %s/%s is not found for addon %s", installProgression.PlacementRef.Namespace, installProgression.PlacementRef.Name, cma.Name) + continue + } + if err != nil { + errs = append(errs, err) + continue + } + + for _, installStrategy := range cma.Spec.InstallStrategy.Placements { + if installStrategy.PlacementRef == installProgression.PlacementRef { + graph.addPlacementNode(installStrategy, installProgression, clusters) + + } + } + } + + return graph, utilerrors.NewAggregate(errs) +} + +func (c *addonConfigurationController) getClustersByPlacement(name, namespace string) ([]string, error) { + var clusters []string + if c.placementLister == nil || c.placementDecisionLister == nil { + return clusters, nil + } + _, err := c.placementLister.Placements(namespace).Get(name) + if err != nil { + return clusters, err + } + + decisionSelector := labels.SelectorFromSet(labels.Set{ + clusterv1beta1.PlacementLabel: name, + }) + decisions, err := c.placementDecisionLister.PlacementDecisions(namespace).List(decisionSelector) + if err != nil { + return clusters, err + } + + for _, d := range decisions { + for _, sd := range d.Status.Decisions { + clusters = append(clusters, sd.ClusterName) + } + } + + return clusters, nil +} diff --git a/pkg/addon/controllers/addonconfiguration/graph.go b/pkg/addon/controllers/addonconfiguration/graph.go new file mode 100644 index 000000000..789e15d74 --- /dev/null +++ b/pkg/addon/controllers/addonconfiguration/graph.go @@ -0,0 +1,343 @@ +package addonconfiguration + +import ( + "fmt" + "math" + "sort" + "strconv" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +var ( + defaultMaxConcurrency = intstr.FromString("25%") + maxMaxConcurrency = intstr.FromString("100%") +) + +// configurationTree is a 2 level snapshot tree on the configuration of addons +// the first level is a list of nodes that represents a install strategy and a desired configuration for this install +// strategy. The second level is a list of nodes that represent each mca and its desired configuration +type configurationGraph struct { + // nodes maintains a list between a installStrategy and its related mcas + nodes []*installStrategyNode + // defaults is the nodes with no install strategy + defaults *installStrategyNode +} + +// installStrategyNode is a node in configurationGraph defined by a install strategy +type installStrategyNode struct { + placementRef addonv1alpha1.PlacementRef + maxConcurrency intstr.IntOrString + desiredConfigs addonConfigMap + // children keeps a map of addons node as the children of this node + children map[string]*addonNode + clusters sets.Set[string] +} + +// addonNode is node as a child of installStrategy node represting a mca +// addonnode +type addonNode struct { + desiredConfigs addonConfigMap + mca *addonv1alpha1.ManagedClusterAddOn + // record mca upgrade status + mcaUpgradeStatus upgradeStatus +} + +type upgradeStatus int + +const ( + // mca desired configs not synced from desiredConfigs yet + toupgrade upgradeStatus = iota + // mca desired configs upgraded and last applied configs not upgraded + upgrading + // both desired configs and last applied configs are upgraded + upgraded +) + +type addonConfigMap map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference + +// set addon upgrade status +func (n *addonNode) setUpgradeStatus() { + if len(n.mca.Status.ConfigReferences) != len(n.desiredConfigs) { + n.mcaUpgradeStatus = toupgrade + return + } + + for _, actual := range n.mca.Status.ConfigReferences { + if desired, ok := n.desiredConfigs[actual.ConfigGroupResource]; ok { + if !equality.Semantic.DeepEqual(desired.DesiredConfig, actual.DesiredConfig) { + n.mcaUpgradeStatus = toupgrade + return + } else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) { + n.mcaUpgradeStatus = upgrading + return + } + } else { + n.mcaUpgradeStatus = toupgrade + return + } + } + + n.mcaUpgradeStatus = upgraded +} + +func (d addonConfigMap) copy() addonConfigMap { + output := addonConfigMap{} + for k, v := range d { + output[k] = v + } + return output +} + +func newGraph(supportedConfigs []addonv1alpha1.ConfigMeta, defaultConfigReferences []addonv1alpha1.DefaultConfigReference) *configurationGraph { + graph := &configurationGraph{ + nodes: []*installStrategyNode{}, + defaults: &installStrategyNode{ + maxConcurrency: maxMaxConcurrency, + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{}, + children: map[string]*addonNode{}, + }, + } + + // init graph.defaults.desiredConfigs with supportedConfigs + for _, config := range supportedConfigs { + if config.DefaultConfig != nil { + graph.defaults.desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: *config.DefaultConfig, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: *config.DefaultConfig, + }, + } + } + } + // copy the spechash from cma status defaultConfigReferences + for _, configRef := range defaultConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + defaultsDesiredConfig, ok := graph.defaults.desiredConfigs[configRef.ConfigGroupResource] + if ok && (defaultsDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + defaultsDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + + return graph +} + +// addAddonNode to the graph, starting from placement with the highest order +func (g *configurationGraph) addAddonNode(mca *addonv1alpha1.ManagedClusterAddOn) { + for i := len(g.nodes) - 1; i >= 0; i-- { + if g.nodes[i].clusters.Has(mca.Namespace) { + g.nodes[i].addNode(mca) + return + } + } + + g.defaults.addNode(mca) +} + +// addNode delete clusters on existing graph so the new configuration overrides the previous +func (g *configurationGraph) addPlacementNode( + installStrategy addonv1alpha1.PlacementStrategy, + installProgression addonv1alpha1.InstallProgression, + clusters []string, +) { + placementRef := installProgression.PlacementRef + installConfigReference := installProgression.ConfigReferences + + node := &installStrategyNode{ + placementRef: placementRef, + maxConcurrency: maxMaxConcurrency, + desiredConfigs: g.defaults.desiredConfigs, + children: map[string]*addonNode{}, + clusters: sets.New[string](clusters...), + } + + // set max concurrency + if installStrategy.RolloutStrategy.Type == addonv1alpha1.AddonRolloutStrategyRollingUpdate { + if installStrategy.RolloutStrategy.RollingUpdate != nil { + node.maxConcurrency = installStrategy.RolloutStrategy.RollingUpdate.MaxConcurrency + } else { + node.maxConcurrency = defaultMaxConcurrency + } + } + + // overrides configuration by install strategy + if len(installConfigReference) > 0 { + node.desiredConfigs = node.desiredConfigs.copy() + for _, configRef := range installConfigReference { + if configRef.DesiredConfig == nil { + continue + } + node.desiredConfigs[configRef.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: configRef.ConfigGroupResource, + ConfigReferent: configRef.DesiredConfig.ConfigReferent, + DesiredConfig: configRef.DesiredConfig.DeepCopy(), + } + } + } + + // remove addon in defaults and other placements. + for _, cluster := range clusters { + if _, ok := g.defaults.children[cluster]; ok { + node.addNode(g.defaults.children[cluster].mca) + delete(g.defaults.children, cluster) + } + for _, placement := range g.nodes { + if _, ok := placement.children[cluster]; ok { + node.addNode(placement.children[cluster].mca) + delete(placement.children, cluster) + } + } + } + g.nodes = append(g.nodes, node) +} + +func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]*installStrategyNode { + placementNodeMap := map[addonv1alpha1.PlacementRef]*installStrategyNode{} + for _, node := range g.nodes { + placementNodeMap[node.placementRef] = node + } + + return placementNodeMap +} + +func (g *configurationGraph) addonToUpdate() []*addonNode { + var addons []*addonNode + for _, node := range g.nodes { + addons = append(addons, node.addonToUpdate()...) + } + + addons = append(addons, g.defaults.addonToUpdate()...) + + return addons +} + +func (n *installStrategyNode) addNode(addon *addonv1alpha1.ManagedClusterAddOn) { + n.children[addon.Namespace] = &addonNode{ + mca: addon, + desiredConfigs: n.desiredConfigs, + } + + // override configuration by mca spec + if len(addon.Spec.Configs) > 0 { + n.children[addon.Namespace].desiredConfigs = n.children[addon.Namespace].desiredConfigs.copy() + // TODO we should also filter out the configs which are not supported configs. + for _, config := range addon.Spec.Configs { + n.children[addon.Namespace].desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: config.ConfigReferent, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: config.ConfigReferent, + }, + } + // copy the spechash from mca status + for _, configRef := range addon.Status.ConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + nodeDesiredConfig, ok := n.children[addon.Namespace].desiredConfigs[configRef.ConfigGroupResource] + if ok && (nodeDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + nodeDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + } + } + + // set addon node upgrade status + n.children[addon.Namespace].setUpgradeStatus() +} + +func (n *installStrategyNode) addonUpgraded() int { + count := 0 + for _, addon := range n.children { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgraded { + count += 1 + } + } + return count +} + +func (n *installStrategyNode) addonUpgrading() int { + count := 0 + for _, addon := range n.children { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgrading { + count += 1 + } + } + return count +} + +// addonToUpdate finds the addons to be updated by placement +func (n *installStrategyNode) addonToUpdate() []*addonNode { + var addons []*addonNode + + // sort the children by key + keys := make([]string, 0, len(n.children)) + for k := range n.children { + keys = append(keys, k) + } + sort.Strings(keys) + + total := len(n.clusters) + if total == 0 { + total = len(n.children) + } + + length, _ := parseMaxConcurrency(n.maxConcurrency, total) + if length == 0 { + return addons + } + + for i, k := range keys { + if (i%length == 0) && len(addons) > 0 { + return addons + } + + addon := n.children[k] + if addon.mcaUpgradeStatus != upgraded { + addons = append(addons, addon) + } + } + + return addons +} + +func parseMaxConcurrency(maxConcurrency intstr.IntOrString, total int) (int, error) { + var length int + + switch maxConcurrency.Type { + case intstr.String: + str := maxConcurrency.StrVal + f, err := strconv.ParseFloat(str[:len(str)-1], 64) + if err != nil { + return length, err + } + length = int(math.Ceil(f / 100 * float64(total))) + case intstr.Int: + length = maxConcurrency.IntValue() + default: + return length, fmt.Errorf("incorrect MaxConcurrency type %v", maxConcurrency.Type) + } + + return length, nil +} + +func desiredConfigsEqual(a, b addonConfigMap) bool { + if len(a) != len(b) { + return false + } + + for configgrA := range a { + if a[configgrA] != b[configgrA] { + return false + } + } + + return true +} diff --git a/pkg/addon/controllers/addonconfiguration/graph_test.go b/pkg/addon/controllers/addonconfiguration/graph_test.go new file mode 100644 index 000000000..707977566 --- /dev/null +++ b/pkg/addon/controllers/addonconfiguration/graph_test.go @@ -0,0 +1,467 @@ +package addonconfiguration + +import ( + "reflect" + "testing" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +type placementDesicion struct { + addonv1alpha1.PlacementRef + clusters []string +} + +func TestConfigurationGraph(t *testing.T) { + cases := []struct { + name string + defaultConfigs []addonv1alpha1.ConfigMeta + defaultConfigReference []addonv1alpha1.DefaultConfigReference + addons []*addonv1alpha1.ManagedClusterAddOn + placementDesicions []placementDesicion + placementStrategies []addonv1alpha1.PlacementStrategy + installProgressions []addonv1alpha1.InstallProgression + expected []*addonNode + }{ + { + name: "no output", + expected: nil, + }, + { + name: "default config only", + defaultConfigs: []addonv1alpha1.ConfigMeta{ + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}}, + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}}, + }, + defaultConfigReference: []addonv1alpha1.DefaultConfigReference{ + newDefaultConfigReference("core", "Foo", "test", ""), + }, + addons: []*addonv1alpha1.ManagedClusterAddOn{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + }, + expected: []*addonNode{ + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster1"), + }, + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster2"), + }, + }, + }, + { + name: "with placement strategy", + defaultConfigs: []addonv1alpha1.ConfigMeta{ + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}}, + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}}, + }, + defaultConfigReference: []addonv1alpha1.DefaultConfigReference{ + newDefaultConfigReference("core", "Bar", "test", ""), + newDefaultConfigReference("core", "Foo", "test", ""), + }, + addons: []*addonv1alpha1.ManagedClusterAddOn{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + addontesting.NewAddon("test", "cluster3"), + }, + placementDesicions: []placementDesicion{ + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + clusters: []string{"cluster1"}}, + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}, + clusters: []string{"cluster2"}}, + }, + placementStrategies: []addonv1alpha1.PlacementStrategy{ + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}, + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}}, + }, + installProgressions: []addonv1alpha1.InstallProgression{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + newInstallConfigReference("core", "Bar", "test1", ""), + }, + }, + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + newInstallConfigReference("core", "Bar", "test2", ""), + newInstallConfigReference("core", "Foo", "test2", ""), + }, + }, + }, + expected: []*addonNode{ + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster1"), + }, + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster2"), + }, + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster3"), + }, + }, + }, + { + name: "placement overlap", + defaultConfigs: []addonv1alpha1.ConfigMeta{ + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}}, + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}}, + }, + defaultConfigReference: []addonv1alpha1.DefaultConfigReference{ + newDefaultConfigReference("core", "Bar", "test", ""), + newDefaultConfigReference("core", "Foo", "test", ""), + }, + addons: []*addonv1alpha1.ManagedClusterAddOn{ + addontesting.NewAddon("test", "cluster1"), + addontesting.NewAddon("test", "cluster2"), + addontesting.NewAddon("test", "cluster3"), + }, + placementStrategies: []addonv1alpha1.PlacementStrategy{ + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}, + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}}, + }, + placementDesicions: []placementDesicion{ + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + clusters: []string{"cluster1", "cluster2"}}, + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}, + clusters: []string{"cluster2", "cluster3"}}, + }, + installProgressions: []addonv1alpha1.InstallProgression{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + newInstallConfigReference("core", "Bar", "test1", ""), + }, + }, + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + newInstallConfigReference("core", "Bar", "test2", ""), + newInstallConfigReference("core", "Foo", "test2", ""), + }, + }, + }, + expected: []*addonNode{ + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster1"), + }, + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster2"), + }, + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster3"), + }, + }, + }, + { + name: "mca override", + defaultConfigs: []addonv1alpha1.ConfigMeta{ + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}}, + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}}, + }, + defaultConfigReference: []addonv1alpha1.DefaultConfigReference{ + newDefaultConfigReference("core", "Bar", "test", ""), + newDefaultConfigReference("core", "Foo", "test", ""), + }, + addons: []*addonv1alpha1.ManagedClusterAddOn{ + newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{ + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}}, + }, nil), + addontesting.NewAddon("test", "cluster2"), + addontesting.NewAddon("test", "cluster3"), + }, + placementStrategies: []addonv1alpha1.PlacementStrategy{ + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}, + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}}, + }, + placementDesicions: []placementDesicion{ + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + clusters: []string{"cluster1"}}, + {PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}, + clusters: []string{"cluster2"}}, + }, + installProgressions: []addonv1alpha1.InstallProgression{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + newInstallConfigReference("core", "Foo", "test1", ""), + }, + }, + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + newInstallConfigReference("core", "Bar", "test2", ""), + newInstallConfigReference("core", "Foo", "test2", ""), + }, + }, + }, + expected: []*addonNode{ + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "", + }, + }, + }, + mca: newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{ + {ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}}, + }, nil), + }, + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster2"), + }, + { + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{ + {Group: "core", Resource: "Bar"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + {Group: "core", Resource: "Foo"}: { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "", + }, + }, + }, + mca: addontesting.NewAddon("test", "cluster3"), + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + graph := newGraph(c.defaultConfigs, c.defaultConfigReference) + for _, addon := range c.addons { + graph.addAddonNode(addon) + } + for i, decision := range c.placementDesicions { + graph.addPlacementNode(c.placementStrategies[i], c.installProgressions[i], decision.clusters) + } + + actual := graph.addonToUpdate() + if len(actual) != len(c.expected) { + t.Errorf("output length is not correct, expected %v, got %v", len(c.expected), len(actual)) + } + + for _, ev := range c.expected { + compared := false + for _, v := range actual { + if v == nil || ev == nil { + t.Errorf("addonNode should not be nil") + } + if ev.mca != nil && v.mca != nil && ev.mca.Namespace == v.mca.Namespace { + if !reflect.DeepEqual(v, ev) { + t.Errorf("output is not correct, cluster %s, expected %v, got %v", v.mca.Namespace, ev, v) + } + compared = true + } + } + + if !compared { + t.Errorf("not found addonNode %v", ev.mca) + } + } + }) + } +} + +func newInstallConfigReference(group, resource, name, hash string) addonv1alpha1.InstallConfigReference { + return addonv1alpha1.InstallConfigReference{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ + Group: group, + Resource: resource, + }, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: name}, + SpecHash: hash, + }, + } +} + +func newDefaultConfigReference(group, resource, name, hash string) addonv1alpha1.DefaultConfigReference { + return addonv1alpha1.DefaultConfigReference{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ + Group: group, + Resource: resource, + }, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: name}, + SpecHash: hash, + }, + } +} diff --git a/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go b/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go new file mode 100644 index 000000000..4c44d700e --- /dev/null +++ b/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go @@ -0,0 +1,139 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type clusterManagementAddonProgressingReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *clusterManagementAddonProgressingReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + cmaCopy := cma.DeepCopy() + placementNodes := graph.getPlacementNodes() + + // go through addons and update condition per install progression + for i, installProgression := range cmaCopy.Status.InstallProgressions { + placementNode, exist := placementNodes[installProgression.PlacementRef] + if !exist { + continue + } + + isUpgrade := false + + for _, configReference := range installProgression.ConfigReferences { + if configReference.LastAppliedConfig != nil { + isUpgrade = true + break + } + } + + setAddOnInstallProgressionsAndLastApplied(&cmaCopy.Status.InstallProgressions[i], + isUpgrade, + placementNode.addonUpgrading(), + placementNode.addonUpgraded(), + len(placementNode.clusters), + ) + } + + err := d.patchMgmtAddonStatus(ctx, cmaCopy, cma) + if err != nil { + errs = append(errs, err) + } + return cmaCopy, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *clusterManagementAddonProgressingReconciler) patchMgmtAddonStatus(ctx context.Context, new, old *addonv1alpha1.ClusterManagementAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: old.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: new.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching clustermanagementaddon %s status with %s", new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1.InstallProgression, isUpgrade bool, progressing, done, total int) { + // always update progressing condition when there is no config + // skip update progressing condition when last applied config already the same as desired + skip := len(installProgression.ConfigReferences) > 0 + for _, configReference := range installProgression.ConfigReferences { + if !equality.Semantic.DeepEqual(configReference.LastAppliedConfig, configReference.DesiredConfig) && + !equality.Semantic.DeepEqual(configReference.LastKnownGoodConfig, configReference.DesiredConfig) { + skip = false + } + } + if skip { + return + } + condition := metav1.Condition{ + Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, + } + if (total == 0 && done == 0) || (done != total) { + condition.Status = metav1.ConditionTrue + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgrading + condition.Message = fmt.Sprintf("%d/%d upgrading...", progressing+done, total) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstalling + condition.Message = fmt.Sprintf("%d/%d installing...", progressing+done, total) + } + } else { + for i, configRef := range installProgression.ConfigReferences { + installProgression.ConfigReferences[i].LastAppliedConfig = configRef.DesiredConfig.DeepCopy() + installProgression.ConfigReferences[i].LastKnownGoodConfig = configRef.DesiredConfig.DeepCopy() + } + condition.Status = metav1.ConditionFalse + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgradeSucceed + condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors.", done, total) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstallSucceed + condition.Message = fmt.Sprintf("%d/%d install completed with no errors.", done, total) + } + } + meta.SetStatusCondition(&installProgression.Conditions, condition) +} diff --git a/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler_test.go b/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler_test.go new file mode 100644 index 000000000..24827fa58 --- /dev/null +++ b/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler_test.go @@ -0,0 +1,628 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "testing" + "time" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/index" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +func TestMgmtAddonProgressingReconcile(t *testing.T) { + cases := []struct { + name string + managedClusteraddon []runtime.Object + clusterManagementAddon []runtime.Object + placements []runtime.Object + placementDecisions []runtime.Object + validateAddonActions func(t *testing.T, actions []clienttesting.Action) + expectErr bool + }{ + { + name: "no managedClusteraddon", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", ""). + WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build()}, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "placement1", + Namespace: "test", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig != nil { + t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { + t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Reason) + } + if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/2 installing..." { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message) + } + }, + }, + { + name: "no placement", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build()}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "update clustermanagementaddon status with condition Progressing installing", + managedClusteraddon: []runtime.Object{func() *addonv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + } + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", ""). + WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build()}, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "placement1", + Namespace: "test", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig != nil { + t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { + t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Reason) + } + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..." { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message) + } + }, + }, + { + name: "update clustermanagementaddon status with condition Progressing install succeed", + managedClusteraddon: []runtime.Object{func() *addonv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + } + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", ""). + WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build()}, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "placement1", + Namespace: "test", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstallSucceed { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 install completed with no errors." { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + }, + }, + { + name: "update clustermanagementaddon status with condition Progressing upgrading", + managedClusteraddon: []runtime.Object{func() *addonv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + } + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", ""). + WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash", + }, + }, + }, + }).Build()}, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "placement1", + Namespace: "test", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { + t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgrading { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 upgrading..." { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + }, + }, + { + name: "update clustermanagementaddon status with condition Progressing upgrade succeed", + managedClusteraddon: []runtime.Object{func() *addonv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + } + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", ""). + WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash", + }, + }, + }, + }).Build()}, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "placement1", + Namespace: "test", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgradeSucceed { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 upgrade completed with no errors." { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + }, + }, + { + name: "mca override cma configs", + managedClusteraddon: []runtime.Object{func() *addonv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Spec.Configs = []addonv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "testmca"}, + }, + } + addon.Status.ConfigReferences = []addonv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "testmca"}, + SpecHash: "hashmca", + }, + LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "testmca"}, + SpecHash: "hashmca", + }, + }, + } + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", ""). + WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash", + }, + }, + }, + }).Build()}, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "placement1", + Namespace: "test", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { + t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgrading { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/1 upgrading..." { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + }, + }, + { + name: "update clustermanagementaddon status with condition Progressing ConfigurationUnsupported", + managedClusteraddon: []runtime.Object{func() *addonv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + } + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", ""). + WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}). + WithInstallProgression(addonv1alpha1.InstallProgression{ + PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}, + ConfigReferences: []addonv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, + SpecHash: "hash1", + }, + }, + }, + }).Build()}, + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "placement1", + Namespace: "test", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig != nil { + t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { + t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) + } + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) + } + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..." { + t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message) + } + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + obj := append(c.clusterManagementAddon, c.managedClusteraddon...) + clusterObj := append(c.placements, c.placementDecisions...) + fakeClusterClient := fakecluster.NewSimpleClientset(clusterObj...) + fakeAddonClient := fakeaddon.NewSimpleClientset(obj...) + + addonInformers := addoninformers.NewSharedInformerFactory(fakeAddonClient, 10*time.Minute) + clusterInformers := clusterv1informers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute) + + err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ManagedClusterAddonByName: index.IndexManagedClusterAddonByName, + }) + if err != nil { + t.Fatal(err) + } + + for _, obj := range c.placements { + if err := clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + for _, obj := range c.placementDecisions { + if err := clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + for _, obj := range c.clusterManagementAddon { + if err = addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + for _, obj := range c.managedClusteraddon { + if err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + controller := &addonConfigurationController{ + addonClient: fakeAddonClient, + placementDecisionLister: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister(), + placementLister: clusterInformers.Cluster().V1beta1().Placements().Lister(), + clusterManagementAddonLister: addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Lister(), + managedClusterAddonIndexer: addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetIndexer(), + } + + reconcile := &clusterManagementAddonProgressingReconciler{ + addonClient: fakeAddonClient, + } + + for _, obj := range c.clusterManagementAddon { + graph, err := controller.buildConfigurationGraph(obj.(*addonv1alpha1.ClusterManagementAddOn)) + if err != nil { + t.Errorf("expected no error when build graph: %v", err) + } + _, _, err = reconcile.reconcile(context.TODO(), obj.(*addonv1alpha1.ClusterManagementAddOn), graph) + if err != nil && !c.expectErr { + t.Errorf("expected no error when sync: %v", err) + } + if err == nil && c.expectErr { + t.Errorf("Expect error but got no error") + } + } + + c.validateAddonActions(t, fakeAddonClient.Actions()) + }) + } +} diff --git a/pkg/addon/controllers/addonmanagement/addon_install_reconciler.go b/pkg/addon/controllers/addonmanagement/addon_install_reconciler.go new file mode 100644 index 000000000..13e403c47 --- /dev/null +++ b/pkg/addon/controllers/addonmanagement/addon_install_reconciler.go @@ -0,0 +1,120 @@ +package addonmanagement + +import ( + "context" + + "github.com/openshift/library-go/pkg/controller/factory" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/index" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +type managedClusterAddonInstallReconciler struct { + addonClient addonv1alpha1client.Interface + managedClusterAddonIndexer cache.Indexer + placementLister clusterlisterv1beta1.PlacementLister + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister + addonFilterFunc factory.EventFilterFunc +} + +func (d *managedClusterAddonInstallReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + // skip apply install strategy for self-managed addon + // this is to avoid conflict when addon also define WithInstallStrategy() + // the filter will be removed after WithInstallStrategy() is removed from framework. + if !d.addonFilterFunc(cma) { + return cma, reconcileContinue, nil + } + + if cma.Spec.InstallStrategy.Type == "" || cma.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + return cma, reconcileContinue, nil + } + + addons, err := d.managedClusterAddonIndexer.ByIndex(index.ManagedClusterAddonByName, cma.Name) + if err != nil { + return cma, reconcileContinue, err + } + + existingDeployed := sets.Set[string]{} + for _, addonObject := range addons { + addon := addonObject.(*addonv1alpha1.ManagedClusterAddOn) + existingDeployed.Insert(addon.Namespace) + } + + requiredDeployed, err := d.getAllDecisions(cma.Name, cma.Spec.InstallStrategy.Placements) + if err != nil { + return cma, reconcileContinue, err + } + + owner := metav1.NewControllerRef(cma, addonv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) + toAdd := requiredDeployed.Difference(existingDeployed) + toRemove := existingDeployed.Difference(requiredDeployed) + + var errs []error + for cluster := range toAdd { + _, err := d.addonClient.AddonV1alpha1().ManagedClusterAddOns(cluster).Create(ctx, &addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: cma.Name, + Namespace: cluster, + OwnerReferences: []metav1.OwnerReference{*owner}, + }, + Spec: addonv1alpha1.ManagedClusterAddOnSpec{}, + }, metav1.CreateOptions{}) + + if err != nil && !errors.IsAlreadyExists(err) { + errs = append(errs, err) + } + } + + for cluster := range toRemove { + err := d.addonClient.AddonV1alpha1().ManagedClusterAddOns(cluster).Delete(ctx, cma.Name, metav1.DeleteOptions{}) + if err != nil && !errors.IsNotFound(err) { + errs = append(errs, err) + } + } + + return cma, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *managedClusterAddonInstallReconciler) getAllDecisions(addonName string, placements []addonv1alpha1.PlacementStrategy) (sets.Set[string], error) { + var errs []error + required := sets.Set[string]{} + for _, strategy := range placements { + _, err := d.placementLister.Placements(strategy.PlacementRef.Namespace).Get(strategy.PlacementRef.Name) + if errors.IsNotFound(err) { + klog.V(2).Infof("placement %s/%s is not found for addon %s", strategy.PlacementRef.Namespace, strategy.PlacementRef.Name, addonName) + continue + } + if err != nil { + errs = append(errs, err) + continue + } + + decisionSelector := labels.SelectorFromSet(labels.Set{ + clusterv1beta1.PlacementLabel: strategy.PlacementRef.Name, + }) + decisions, err := d.placementDecisionLister.PlacementDecisions(strategy.PlacementRef.Namespace).List(decisionSelector) + if err != nil { + errs = append(errs, err) + continue + } + + for _, d := range decisions { + for _, sd := range d.Status.Decisions { + required.Insert(sd.ClusterName) + } + } + } + + return required, utilerrors.NewAggregate(errs) +} diff --git a/pkg/addon/controllers/addonmanagement/controller.go b/pkg/addon/controllers/addonmanagement/controller.go new file mode 100644 index 000000000..516e36895 --- /dev/null +++ b/pkg/addon/controllers/addonmanagement/controller.go @@ -0,0 +1,117 @@ +package addonmanagement + +import ( + "context" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/index" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" +) + +type addonManagementController struct { + addonClient addonv1alpha1client.Interface + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + clusterManagementAddonIndexer cache.Indexer + + reconcilers []addonManagementReconcile +} + +// addonManagementReconcile is a interface for reconcile logic. It creates ManagedClusterAddon based on install strategy +type addonManagementReconcile interface { + reconcile(ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) +} + +type reconcileState int64 + +const ( + reconcileStop reconcileState = iota + reconcileContinue +) + +func NewAddonManagementController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + placementInformer clusterinformersv1beta1.PlacementInformer, + placementDecisionInformer clusterinformersv1beta1.PlacementDecisionInformer, + addonFilterFunc factory.EventFilterFunc, + recorder events.Recorder, +) factory.Controller { + c := &addonManagementController{ + addonClient: addonClient, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + clusterManagementAddonIndexer: clusterManagementAddonInformers.Informer().GetIndexer(), + + reconcilers: []addonManagementReconcile{ + &managedClusterAddonInstallReconciler{ + addonClient: addonClient, + placementDecisionLister: placementDecisionInformer.Lister(), + placementLister: placementInformer.Lister(), + managedClusterAddonIndexer: addonInformers.Informer().GetIndexer(), + addonFilterFunc: addonFilterFunc, + }, + }, + } + + return factory.New().WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer(), clusterManagementAddonInformers.Informer()). + WithInformersQueueKeysFunc( + index.ClusterManagementAddonByPlacementDecisionQueueKey( + clusterManagementAddonInformers), + placementDecisionInformer.Informer()). + WithInformersQueueKeysFunc( + index.ClusterManagementAddonByPlacementQueueKey( + clusterManagementAddonInformers), + placementInformer.Informer()). + WithSync(c.sync).ToController("addon-management-controller", recorder) +} + +func (c *addonManagementController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + key := syncCtx.QueueKey() + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + klog.V(4).Infof("Reconciling addon %q", addonName) + + cma, err := c.clusterManagementAddonLister.Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + cma = cma.DeepCopy() + + var state reconcileState + var errs []error + for _, reconciler := range c.reconcilers { + cma, state, err = reconciler.reconcile(ctx, cma) + if err != nil { + errs = append(errs, err) + } + if state == reconcileStop { + break + } + } + + return utilerrors.NewAggregate(errs) +} diff --git a/pkg/addon/controllers/addonmanagement/controller_test.go b/pkg/addon/controllers/addonmanagement/controller_test.go new file mode 100644 index 000000000..af61b4ce5 --- /dev/null +++ b/pkg/addon/controllers/addonmanagement/controller_test.go @@ -0,0 +1,274 @@ +package addonmanagement + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/utils" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +func TestAddonInstallReconcile(t *testing.T) { + cases := []struct { + name string + managedClusteraddon []runtime.Object + clusterManagementAddon *addonv1alpha1.ClusterManagementAddOn + placements []runtime.Object + placementDecisions []runtime.Object + validateAddonActions func(t *testing.T, actions []clienttesting.Action) + expectErr bool + }{ + { + name: "no installStrategy", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").Build(), + placements: []runtime.Object{}, + placementDecisions: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "manual installStrategy", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: func() *addonv1alpha1.ClusterManagementAddOn { + addon := addontesting.NewClusterManagementAddon("test", "", "").Build() + addon.Spec.InstallStrategy = addonv1alpha1.InstallStrategy{ + Type: addonv1alpha1.AddonInstallStrategyManual, + } + return addon + }(), + placements: []runtime.Object{}, + placementDecisions: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "placement is missting", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: func() *addonv1alpha1.ClusterManagementAddOn { + addon := addontesting.NewClusterManagementAddon("test", "", "").Build() + addon.Spec.InstallStrategy = addonv1alpha1.InstallStrategy{ + Type: addonv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonv1alpha1.PlacementStrategy{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }, + }, + } + return addon + }(), + placements: []runtime.Object{}, + placementDecisions: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "placement decision is missting", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: func() *addonv1alpha1.ClusterManagementAddOn { + addon := addontesting.NewClusterManagementAddon("test", "", "").Build() + addon.Spec.InstallStrategy = addonv1alpha1.InstallStrategy{ + Type: addonv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonv1alpha1.PlacementStrategy{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }, + }, + } + return addon + }(), + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "install addon", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: func() *addonv1alpha1.ClusterManagementAddOn { + addon := addontesting.NewClusterManagementAddon("test", "", "").Build() + addon.Spec.InstallStrategy = addonv1alpha1.InstallStrategy{ + Type: addonv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonv1alpha1.PlacementStrategy{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }, + }, + } + return addon + }(), + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "create", "create") + }, + }, + { + name: "addon/remove addon", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster0"), + addontesting.NewAddon("test", "cluster1"), + }, + clusterManagementAddon: func() *addonv1alpha1.ClusterManagementAddOn { + addon := addontesting.NewClusterManagementAddon("test", "", "").Build() + addon.Spec.InstallStrategy = addonv1alpha1.InstallStrategy{ + Type: addonv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonv1alpha1.PlacementStrategy{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }, + }, + } + return addon + }(), + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "create", "delete") + }, + }, + { + name: "multiple placements", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster0"), + addontesting.NewAddon("test", "cluster1"), + }, + clusterManagementAddon: func() *addonv1alpha1.ClusterManagementAddOn { + addon := addontesting.NewClusterManagementAddon("test", "", "").Build() + addon.Spec.InstallStrategy = addonv1alpha1.InstallStrategy{ + Type: addonv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonv1alpha1.PlacementStrategy{ + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + }, + { + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement1", Namespace: "default"}, + }, + }, + } + return addon + }(), + placements: []runtime.Object{ + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, + &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement1", Namespace: "default"}}, + }, + placementDecisions: []runtime.Object{ + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}, + }, + }, + &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement1", + Namespace: "default", + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement1"}, + }, + Status: clusterv1beta1.PlacementDecisionStatus{ + Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}}, + }, + }, + }, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "create", "create", "delete") + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + clusterObj := append(c.placements, c.placementDecisions...) + fakeClusterClient := fakecluster.NewSimpleClientset(clusterObj...) + fakeAddonClient := fakeaddon.NewSimpleClientset(c.managedClusteraddon...) + + addonInformers := addoninformers.NewSharedInformerFactory(fakeAddonClient, 10*time.Minute) + clusterInformers := clusterv1informers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute) + + err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ManagedClusterAddonByName: index.IndexManagedClusterAddonByName, + }) + if err != nil { + t.Fatal(err) + } + + for _, obj := range c.placements { + if err := clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + for _, obj := range c.placementDecisions { + if err := clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + for _, obj := range c.managedClusteraddon { + if err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + reconcile := &managedClusterAddonInstallReconciler{ + addonClient: fakeAddonClient, + placementLister: clusterInformers.Cluster().V1beta1().Placements().Lister(), + placementDecisionLister: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister(), + managedClusterAddonIndexer: addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetIndexer(), + addonFilterFunc: utils.ManagedBySelf(map[string]agent.AgentAddon{"test": nil}), + } + + _, _, err = reconcile.reconcile(context.TODO(), c.clusterManagementAddon) + if err != nil && !c.expectErr { + t.Errorf("expected no error when sync: %v", err) + } + if err == nil && c.expectErr { + t.Errorf("Expect error but got no error") + } + c.validateAddonActions(t, fakeAddonClient.Actions()) + }) + } +} diff --git a/pkg/addon/controllers/addonowner/controller.go b/pkg/addon/controllers/addonowner/controller.go new file mode 100644 index 000000000..36d69d847 --- /dev/null +++ b/pkg/addon/controllers/addonowner/controller.go @@ -0,0 +1,105 @@ +package addonowner + +import ( + "context" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" +) + +const UnsupportedConfigurationType = "UnsupportedConfiguration" + +// addonOwnerController reconciles instances of managedclusteradd on the hub +// to add related ClusterManagementAddon as the owner. +type addonOwnerController struct { + addonClient addonv1alpha1client.Interface + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + addonFilterFunc factory.EventFilterFunc +} + +func NewAddonOwnerController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + addonFilterFunc factory.EventFilterFunc, + recorder events.Recorder, +) factory.Controller { + c := &addonOwnerController{ + addonClient: addonClient, + managedClusterAddonLister: addonInformers.Lister(), + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + addonFilterFunc: addonFilterFunc, + } + + return factory.New(). + WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, clusterManagementAddonInformers.Informer()). + WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()). + WithSync(c.sync). + ToController("addon-owner-controller", recorder) +} + +func (c *addonOwnerController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + key := syncCtx.QueueKey() + klog.V(4).Infof("Reconciling addon %q", key) + + namespace, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(namespace).Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + addonCopy := addon.DeepCopy() + modified := false + + clusterManagementAddon, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + if !c.addonFilterFunc(clusterManagementAddon) { + return nil + } + + owner := metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) + modified = utils.MergeOwnerRefs(&addonCopy.OwnerReferences, *owner, false) + if modified { + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Update(ctx, addonCopy, metav1.UpdateOptions{}) + return err + } + + return nil +} diff --git a/pkg/addon/controllers/addonowner/controller_test.go b/pkg/addon/controllers/addonowner/controller_test.go new file mode 100644 index 000000000..14a49c226 --- /dev/null +++ b/pkg/addon/controllers/addonowner/controller_test.go @@ -0,0 +1,94 @@ +package addonowner + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clienttesting "k8s.io/client-go/testing" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + + testingcommon "open-cluster-management.io/ocm/pkg/common/testing" +) + +func newClusterManagementOwner(name string) metav1.OwnerReference { + clusterManagementAddon := addontesting.NewClusterManagementAddon(name, "testcrd", "testcr").Build() + return *metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) +} + +func TestReconcile(t *testing.T) { + cases := []struct { + name string + syncKey string + managedClusteraddon []runtime.Object + clusterManagementAddon []runtime.Object + validateAddonActions func(t *testing.T, actions []clienttesting.Action) + }{ + { + name: "no clustermanagementaddon", + syncKey: "test/test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "no managedclusteraddon to sync", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "update managedclusteraddon", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1", newClusterManagementOwner("test")), + }, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + validateAddonActions: addontesting.AssertNoActions, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + obj := append(c.clusterManagementAddon, c.managedClusteraddon...) + fakeAddonClient := fakeaddon.NewSimpleClientset(obj...) + + addonInformers := addoninformers.NewSharedInformerFactory(fakeAddonClient, 10*time.Minute) + + for _, obj := range c.managedClusteraddon { + if err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + for _, obj := range c.clusterManagementAddon { + if err := addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + syncContext := testingcommon.NewFakeSyncContext(t, c.syncKey) + recorder := syncContext.Recorder() + + controller := NewAddonOwnerController( + fakeAddonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + utils.ManagedByAddonManager, + recorder) + + err := controller.Sync(context.TODO(), syncContext) + if err != nil { + t.Errorf("expected no error when sync: %v", err) + } + c.validateAddonActions(t, fakeAddonClient.Actions()) + }) + } +} diff --git a/pkg/addon/controllers/addonprogressing/controller.go b/pkg/addon/controllers/addonprogressing/controller.go new file mode 100644 index 000000000..892afd9ec --- /dev/null +++ b/pkg/addon/controllers/addonprogressing/controller.go @@ -0,0 +1,344 @@ +package addonprogressing + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + worklister "open-cluster-management.io/api/client/work/listers/work/v1" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +const ( + ProgressingDoing string = "Doing" + ProgressingSucceed string = "Succeed" + ProgressingFailed string = "Failed" +) + +// addonProgressingController reconciles instances of managedclusteradd on the hub +// based to update the status progressing condition and last applied config +type addonProgressingController struct { + addonClient addonv1alpha1client.Interface + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + workLister worklister.ManifestWorkLister + addonFilterFunc factory.EventFilterFunc +} + +func NewAddonProgressingController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + workInformers workinformers.ManifestWorkInformer, + addonFilterFunc factory.EventFilterFunc, + recorder events.Recorder, +) factory.Controller { + c := &addonProgressingController{ + addonClient: addonClient, + managedClusterAddonLister: addonInformers.Lister(), + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + workLister: workInformers.Lister(), + addonFilterFunc: addonFilterFunc, + } + + return factory.New().WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer(), clusterManagementAddonInformers.Informer()). + // TODO: consider hosted manifestwork + WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{fmt.Sprintf("%s/%s", accessor.GetNamespace(), accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey])} + }, + workInformers.Informer()). + WithSync(c.sync).ToController("addon-progressing-controller", recorder) +} + +func (c *addonProgressingController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + key := syncCtx.QueueKey() + klog.V(4).Infof("Reconciling addon %q", key) + + namespace, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(namespace).Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + clusterManagementAddon, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + if !c.addonFilterFunc(clusterManagementAddon) { + return nil + } + + // update progressing condition and last applied config + return c.updateAddonProgressingAndLastApplied(ctx, addon.DeepCopy(), addon) +} + +func (c *addonProgressingController) updateAddonProgressingAndLastApplied(ctx context.Context, newaddon, oldaddon *addonapiv1alpha1.ManagedClusterAddOn) error { + // check config references + if supported, config := isConfigurationSupported(newaddon); !supported { + meta.SetStatusCondition(&newaddon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonConfigurationUnsupported, + Message: fmt.Sprintf("Configuration with gvr %s/%s is not supported for this addon", config.Group, config.Resource), + }) + return c.patchAddOnProgressingAndLastApplied(ctx, newaddon, oldaddon) + } + + // wait until addon has ManifestApplied condition + if cond := meta.FindStatusCondition(newaddon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied); cond == nil { + meta.SetStatusCondition(&newaddon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: "WaitingForManifestApplied", + Message: "Waiting for ManagedClusterAddOn ManifestApplied condition", + }) + return c.patchAddOnProgressingAndLastApplied(ctx, newaddon, oldaddon) + } + + // set upgrade flag + isUpgrade := false + for _, configReference := range newaddon.Status.ConfigReferences { + if configReference.LastAppliedConfig != nil && configReference.LastAppliedConfig.SpecHash != "" { + isUpgrade = true + break + } + } + + // get addon works + // TODO: consider hosted manifestwork + requirement, _ := labels.NewRequirement(addonapiv1alpha1.AddonLabelKey, selection.Equals, []string{newaddon.Name}) + selector := labels.NewSelector().Add(*requirement) + addonWorks, err := c.workLister.ManifestWorks(newaddon.Namespace).List(selector) + if err != nil { + setAddOnProgressingAndLastApplied(isUpgrade, ProgressingFailed, err.Error(), newaddon) + return c.patchAddOnProgressingAndLastApplied(ctx, newaddon, oldaddon) + } + + if len(addonWorks) == 0 { + setAddOnProgressingAndLastApplied(isUpgrade, ProgressingDoing, "no addon works", newaddon) + return c.patchAddOnProgressingAndLastApplied(ctx, newaddon, oldaddon) + } + + // check addon manifestworks + for _, work := range addonWorks { + // skip pre-delete manifestwork + if strings.HasPrefix(work.Name, constants.PreDeleteHookWorkName(newaddon.Name)) { + continue + } + + // check if work configs matches addon configs + if !workConfigsMatchesAddon(work, newaddon) { + setAddOnProgressingAndLastApplied(isUpgrade, ProgressingDoing, "configs mismatch", newaddon) + return c.patchAddOnProgressingAndLastApplied(ctx, newaddon, oldaddon) + } + + // check if work is ready + if !workIsReady(work) { + setAddOnProgressingAndLastApplied(isUpgrade, ProgressingDoing, "work is not ready", newaddon) + return c.patchAddOnProgressingAndLastApplied(ctx, newaddon, oldaddon) + } + } + + // set lastAppliedConfig when all the work matches addon and are ready. + setAddOnProgressingAndLastApplied(isUpgrade, ProgressingSucceed, "", newaddon) + return c.patchAddOnProgressingAndLastApplied(ctx, newaddon, oldaddon) +} + +func (c *addonProgressingController) patchAddOnProgressingAndLastApplied(ctx context.Context, new, old *addonapiv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + ConfigReferences: old.Status.ConfigReferences, + Conditions: old.Status.Conditions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + ConfigReferences: new.Status.ConfigReferences, + Conditions: new.Status.Conditions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s condition and last applied config with %s", new.Namespace, new.Name, string(patchBytes)) + addon, err := c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + fmt.Printf("%v", addon) + return err +} + +func isConfigurationSupported(addon *addonapiv1alpha1.ManagedClusterAddOn) (bool, addonapiv1alpha1.ConfigGroupResource) { + supportedConfigSet := map[addonapiv1alpha1.ConfigGroupResource]bool{} + for _, config := range addon.Status.SupportedConfigs { + supportedConfigSet[config] = true + } + + for _, config := range addon.Spec.Configs { + if _, ok := supportedConfigSet[config.ConfigGroupResource]; !ok { + return false, config.ConfigGroupResource + } + } + + return true, addonapiv1alpha1.ConfigGroupResource{} +} + +func workConfigsMatchesAddon(work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) bool { + // get work spec hash + if _, ok := work.Annotations[workapiv1.ManifestConfigSpecHashAnnotationKey]; !ok { + return len(addon.Status.ConfigReferences) == 0 + } + + // parse work spec hash + workSpecHashMap := make(map[string]string) + if err := json.Unmarshal([]byte(work.Annotations[workapiv1.ManifestConfigSpecHashAnnotationKey]), &workSpecHashMap); err != nil { + klog.Warningf("%v", err) + return false + } + + // check work spec hash, all the config should have spec hash + for _, v := range workSpecHashMap { + if v == "" { + return false + } + } + + // check addon desired config + for _, configReference := range addon.Status.ConfigReferences { + if configReference.DesiredConfig == nil || configReference.DesiredConfig.SpecHash == "" { + return false + } + } + addonSpecHashMap := agentdeploy.ConfigsToMap(addon.Status.ConfigReferences) + + // compare work and addon configs + return equality.Semantic.DeepEqual(workSpecHashMap, addonSpecHashMap) +} + +// work is ready when +// 1) condition Available status is true. +// 2) condition Available observedGeneration equals to generation. +// 3) If it is a fresh install since one addon can have multiple ManifestWorks, the ManifestWork condition ManifestApplied must also be true. +func workIsReady(work *workapiv1.ManifestWork) bool { + cond := meta.FindStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable) + if cond == nil || cond.Status != metav1.ConditionTrue || cond.ObservedGeneration != work.Generation { + return false + } + cond = meta.FindStatusCondition(work.Status.Conditions, workapiv1.WorkApplied) + if cond == nil || cond.Status != metav1.ConditionTrue || cond.ObservedGeneration != work.Generation { + return false + } + + return true +} + +// set addon progressing condition and last applied +func setAddOnProgressingAndLastApplied(isUpgrade bool, status string, message string, addon *addonapiv1alpha1.ManagedClusterAddOn) { + // always update progressing condition when there is no config + // skip update progressing condition when last applied config already the same as desired + skip := len(addon.Status.ConfigReferences) > 0 + for _, configReference := range addon.Status.ConfigReferences { + if !equality.Semantic.DeepEqual(configReference.LastAppliedConfig, configReference.DesiredConfig) { + skip = false + } + } + if skip { + return + } + + condition := metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + } + switch status { + case ProgressingDoing: + condition.Status = metav1.ConditionTrue + if isUpgrade { + condition.Reason = addonapiv1alpha1.ProgressingReasonUpgrading + condition.Message = fmt.Sprintf("upgrading... %v", message) + } else { + condition.Reason = addonapiv1alpha1.ProgressingReasonInstalling + condition.Message = fmt.Sprintf("installing... %v", message) + } + case ProgressingSucceed: + condition.Status = metav1.ConditionFalse + for i, configReference := range addon.Status.ConfigReferences { + addon.Status.ConfigReferences[i].LastAppliedConfig = configReference.DesiredConfig.DeepCopy() + } + if isUpgrade { + condition.Reason = addonapiv1alpha1.ProgressingReasonUpgradeSucceed + condition.Message = "upgrade completed with no errors." + } else { + condition.Reason = addonapiv1alpha1.ProgressingReasonInstallSucceed + condition.Message = "install completed with no errors." + } + case ProgressingFailed: + condition.Status = metav1.ConditionFalse + if isUpgrade { + condition.Reason = addonapiv1alpha1.ProgressingReasonUpgradeFailed + condition.Message = message + } else { + condition.Reason = addonapiv1alpha1.ProgressingReasonInstallFailed + condition.Message = message + } + } + meta.SetStatusCondition(&addon.Status.Conditions, condition) +} diff --git a/pkg/addon/controllers/addonprogressing/controller_test.go b/pkg/addon/controllers/addonprogressing/controller_test.go new file mode 100644 index 000000000..63b12ceb2 --- /dev/null +++ b/pkg/addon/controllers/addonprogressing/controller_test.go @@ -0,0 +1,619 @@ +package addonprogressing + +import ( + "context" + "encoding/json" + "testing" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clienttesting "k8s.io/client-go/testing" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/utils" + "open-cluster-management.io/api/addon/v1alpha1" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + fakework "open-cluster-management.io/api/client/work/clientset/versioned/fake" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workapiv1 "open-cluster-management.io/api/work/v1" + + testingcommon "open-cluster-management.io/ocm/pkg/common/testing" +) + +func newClusterManagementOwner(name string) metav1.OwnerReference { + clusterManagementAddon := addontesting.NewClusterManagementAddon(name, "testcrd", "testcr").Build() + return *metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) +} + +func TestReconcile(t *testing.T) { + cases := []struct { + name string + syncKey string + managedClusteraddon []runtime.Object + clusterManagementAddon []runtime.Object + work []runtime.Object + validateAddonActions func(t *testing.T, actions []clienttesting.Action) + }{ + { + name: "no clustermanagementaddon", + syncKey: "test/test", + clusterManagementAddon: []runtime.Object{}, + managedClusteraddon: []runtime.Object{}, + work: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "no managedClusteraddon", + syncKey: "test/test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "no work applied condition", + syncKey: "test/test", + managedClusteraddon: []runtime.Object{ + addontesting.NewAddon("test", "cluster1"), + }, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "update managedclusteraddon to installing when no work", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(actual, addOn) + if err != nil { + t.Fatal(err) + } + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && configCond.Status == metav1.ConditionTrue) { + t.Errorf("Condition Progressing is incorrect") + } + }, + }, + { + name: "update managedclusteraddon to installing when work config spec not match", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hashnew", + }, + LastAppliedConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "", + }, + }, + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{func() *workapiv1.ManifestWork { + work := addontesting.NewManifestWork( + "addon-test-deploy", + "cluster1", + addontesting.NewUnstructured("v1", "ConfigMap", "default", "test1"), + addontesting.NewUnstructured("v1", "Deployment", "default", "test1"), + ) + work.SetLabels(map[string]string{ + addonapiv1alpha1.AddonLabelKey: "test", + }) + work.SetAnnotations(map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: "{\"foo.core/open-cluster-management/test\":\"hash\"}", + }) + work.Status.Conditions = []metav1.Condition{ + { + Type: workapiv1.WorkApplied, + Status: metav1.ConditionTrue, + }, + { + Type: workapiv1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + } + return work + }()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(actual, addOn) + if err != nil { + t.Fatal(err) + } + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && configCond.Status == metav1.ConditionTrue) { + t.Errorf("Condition Progressing is incorrect") + } + if len(addOn.Status.ConfigReferences) != 0 { + t.Errorf("ConfigReferences object is not correct: %v", addOn.Status.ConfigReferences) + } + }, + }, + { + name: "update managedclusteraddon to installing when work is not ready", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hashnew", + }, + LastAppliedConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "", + }, + }, + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{func() *workapiv1.ManifestWork { + work := addontesting.NewManifestWork( + "addon-test-deploy", + "cluster1", + addontesting.NewUnstructured("v1", "ConfigMap", "default", "test1"), + addontesting.NewUnstructured("v1", "Deployment", "default", "test1"), + ) + work.SetLabels(map[string]string{ + addonapiv1alpha1.AddonLabelKey: "test", + }) + work.SetAnnotations(map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: "{\"foo.core/open-cluster-management/test\":\"hashnew\"}", + }) + work.Status.Conditions = []metav1.Condition{ + { + Type: workapiv1.WorkApplied, + Status: metav1.ConditionFalse, + }, + { + Type: workapiv1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + } + return work + }()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(actual, addOn) + if err != nil { + t.Fatal(err) + } + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && configCond.Status == metav1.ConditionTrue) { + t.Errorf("Condition Progressing is incorrect") + } + if len(addOn.Status.ConfigReferences) != 0 { + t.Errorf("ConfigReferences object is not correct: %v", addOn.Status.ConfigReferences) + } + }, + }, + { + name: "update managedclusteraddon to uprading when work config spec not match", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hashnew", + }, + LastAppliedConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hash", + }, + }, + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{func() *workapiv1.ManifestWork { + work := addontesting.NewManifestWork( + "addon-test-deploy", + "cluster1", + addontesting.NewUnstructured("v1", "ConfigMap", "default", "test1"), + addontesting.NewUnstructured("v1", "Deployment", "default", "test1"), + ) + work.SetLabels(map[string]string{ + addonapiv1alpha1.AddonLabelKey: "test", + }) + work.SetAnnotations(map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: "{\"foo.core/open-cluster-management/test\":\"hash\"}", + }) + work.Status.Conditions = []metav1.Condition{ + { + Type: workapiv1.WorkApplied, + Status: metav1.ConditionTrue, + }, + { + Type: workapiv1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + } + return work + }()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(actual, addOn) + if err != nil { + t.Fatal(err) + } + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgrading && configCond.Status == metav1.ConditionTrue) { + t.Errorf("Condition Progressing is incorrect") + } + if len(addOn.Status.ConfigReferences) != 0 { + t.Errorf("ConfigReferences object is not correct: %v", addOn.Status.ConfigReferences) + } + }, + }, + { + name: "update managedclusteraddon to uprading when work is not ready", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hashnew", + }, + LastAppliedConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hash", + }, + }, + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{func() *workapiv1.ManifestWork { + work := addontesting.NewManifestWork( + "addon-test-deploy", + "cluster1", + addontesting.NewUnstructured("v1", "ConfigMap", "default", "test1"), + addontesting.NewUnstructured("v1", "Deployment", "default", "test1"), + ) + work.SetLabels(map[string]string{ + addonapiv1alpha1.AddonLabelKey: "test", + }) + work.SetAnnotations(map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: "{\"foo.core/open-cluster-management/test\":\"hashnew\"}", + }) + work.Status.Conditions = []metav1.Condition{ + { + Type: workapiv1.WorkApplied, + Status: metav1.ConditionTrue, + }, + { + Type: workapiv1.WorkAvailable, + Status: metav1.ConditionFalse, + }, + } + return work + }()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(actual, addOn) + if err != nil { + t.Fatal(err) + } + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgrading && configCond.Status == metav1.ConditionTrue) { + t.Errorf("Condition Progressing is incorrect") + } + if len(addOn.Status.ConfigReferences) != 0 { + t.Errorf("ConfigReferences object is not correct: %v", addOn.Status.ConfigReferences) + } + }, + }, + { + name: "update managedclusteraddon to install succeed", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hashnew", + }, + LastAppliedConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "", + }, + }, + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{func() *workapiv1.ManifestWork { + work := addontesting.NewManifestWork( + "addon-test-deploy", + "cluster1", + addontesting.NewUnstructured("v1", "ConfigMap", "default", "test1"), + addontesting.NewUnstructured("v1", "Deployment", "default", "test1"), + ) + work.SetLabels(map[string]string{ + addonapiv1alpha1.AddonLabelKey: "test", + }) + work.SetAnnotations(map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: "{\"foo.core/open-cluster-management/test\":\"hashnew\"}", + }) + work.Status.Conditions = []metav1.Condition{ + { + Type: workapiv1.WorkApplied, + Status: metav1.ConditionTrue, + }, + { + Type: workapiv1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + } + return work + }()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(actual, addOn) + if err != nil { + t.Fatal(err) + } + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstallSucceed && configCond.Status == metav1.ConditionFalse) { + t.Errorf("Condition Progressing is incorrect") + } + if len(addOn.Status.ConfigReferences) != 1 { + t.Errorf("ConfigReferences object is not correct: %v", addOn.Status.ConfigReferences) + } + if addOn.Status.ConfigReferences[0].LastAppliedConfig.SpecHash != addOn.Status.ConfigReferences[0].DesiredConfig.SpecHash { + t.Errorf("LastAppliedConfig object is not correct: %v", addOn.Status.ConfigReferences[0].LastAppliedConfig.SpecHash) + } + }, + }, + { + name: "update managedclusteraddon to upgrade succeed", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "foo"}, + DesiredConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hashnew", + }, + LastAppliedConfig: &v1alpha1.ConfigSpecHash{ + ConfigReferent: v1alpha1.ConfigReferent{Name: "test", Namespace: "open-cluster-management"}, + SpecHash: "hash", + }, + }, + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }()}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{func() *workapiv1.ManifestWork { + work := addontesting.NewManifestWork( + "addon-test-deploy", + "cluster1", + addontesting.NewUnstructured("v1", "ConfigMap", "default", "test1"), + addontesting.NewUnstructured("v1", "Deployment", "default", "test1"), + ) + work.SetLabels(map[string]string{ + addonapiv1alpha1.AddonLabelKey: "test", + }) + work.SetAnnotations(map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: "{\"foo.core/open-cluster-management/test\":\"hashnew\"}", + }) + work.Status.Conditions = []metav1.Condition{ + { + Type: workapiv1.WorkApplied, + Status: metav1.ConditionTrue, + }, + { + Type: workapiv1.WorkAvailable, + Status: metav1.ConditionTrue, + }, + } + return work + }()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(actual, addOn) + if err != nil { + t.Fatal(err) + } + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgradeSucceed && configCond.Status == metav1.ConditionFalse) { + t.Errorf("Condition Progressing is incorrect") + } + if len(addOn.Status.ConfigReferences) != 1 { + t.Errorf("ConfigReferences object is not correct: %v", addOn.Status.ConfigReferences) + } + if addOn.Status.ConfigReferences[0].LastAppliedConfig.SpecHash != addOn.Status.ConfigReferences[0].DesiredConfig.SpecHash { + t.Errorf("LastAppliedConfig object is not correct: %v", addOn.Status.ConfigReferences[0].LastAppliedConfig.SpecHash) + } + }, + }, + { + name: "update managedclusteraddon to configuration unsupported...", + syncKey: "cluster1/test", + managedClusteraddon: []runtime.Object{ + func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "config1.test", + Resource: "config1", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: "cluster1", + Name: "override", + }, + }, + } + addon.Status.SupportedConfigs = []addonapiv1alpha1.ConfigGroupResource{ + { + Group: "configs.test", + Resource: "testconfigs", + }, + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnManifestApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + return addon + }(), + }, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + work: []runtime.Object{}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + patch := actions[0].(clienttesting.PatchAction).GetPatch() + addOn := &addonapiv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(patch, addOn) + if err != nil { + t.Fatal(err) + } + + configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonConfigurationUnsupported && configCond.Status == metav1.ConditionFalse) { + t.Errorf("Condition Progressing is incorrect") + } + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fakeAddonClient := fakeaddon.NewSimpleClientset(c.managedClusteraddon...) + fakeWorkClient := fakework.NewSimpleClientset() + + addonInformers := addoninformers.NewSharedInformerFactory(fakeAddonClient, 10*time.Minute) + workInformers := workinformers.NewSharedInformerFactory(fakeWorkClient, 10*time.Minute) + + for _, obj := range c.managedClusteraddon { + if err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + for _, obj := range c.clusterManagementAddon { + if err := addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + for _, obj := range c.work { + if err := workInformers.Work().V1().ManifestWorks().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + syncContext := testingcommon.NewFakeSyncContext(t, c.syncKey) + recorder := syncContext.Recorder() + + controller := NewAddonProgressingController( + fakeAddonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + workInformers.Work().V1().ManifestWorks(), + utils.ManagedBySelf(map[string]agent.AgentAddon{"test": nil}), + recorder, + ) + + err := controller.Sync(context.TODO(), syncContext) + if err != nil { + t.Errorf("expected no error when sync: %v", err) + } + c.validateAddonActions(t, fakeAddonClient.Actions()) + }) + } +} diff --git a/pkg/addon/controllers/addontemplate/controller.go b/pkg/addon/controllers/addontemplate/controller.go new file mode 100644 index 000000000..f43e3f20a --- /dev/null +++ b/pkg/addon/controllers/addontemplate/controller.go @@ -0,0 +1,211 @@ +package addontemplate + +import ( + "context" + "time" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilrand "k8s.io/apimachinery/pkg/util/rand" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic/dynamicinformer" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/addonfactory" + "open-cluster-management.io/addon-framework/pkg/addonmanager" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" + + "open-cluster-management.io/ocm/pkg/addon/templateagent" +) + +// addonTemplateController monitors ManagedClusterAddOns on hub to get all the in-used addon templates, +// and starts an addon manager for every addon template to handle agent requests deployed by this template +type addonTemplateController struct { + // addonManagers holds all addon managers that will be deployed with template type addons. + // The key is the name of the template type addon. + addonManagers map[string]context.CancelFunc + + kubeConfig *rest.Config + addonClient addonv1alpha1client.Interface + kubeClient kubernetes.Interface + cmaLister addonlisterv1alpha1.ClusterManagementAddOnLister + addonInformers addoninformers.SharedInformerFactory + clusterInformers clusterv1informers.SharedInformerFactory + dynamicInformers dynamicinformer.DynamicSharedInformerFactory + workInformers workv1informers.SharedInformerFactory + runControllerFunc runController +} + +type runController func(ctx context.Context, addonName string) error + +// NewAddonTemplateController returns an instance of addonTemplateController +func NewAddonTemplateController( + hubKubeconfig *rest.Config, + hubKubeClient kubernetes.Interface, + addonClient addonv1alpha1client.Interface, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory, + workInformers workv1informers.SharedInformerFactory, + recorder events.Recorder, + runController ...runController, +) factory.Controller { + c := &addonTemplateController{ + kubeConfig: hubKubeconfig, + kubeClient: hubKubeClient, + addonClient: addonClient, + cmaLister: addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Lister(), + addonManagers: make(map[string]context.CancelFunc), + addonInformers: addonInformers, + clusterInformers: clusterInformers, + dynamicInformers: dynamicInformers, + workInformers: workInformers, + } + + if len(runController) > 0 { + c.runControllerFunc = runController[0] + } else { + // easy to mock in unit tests + c.runControllerFunc = c.runController + } + return factory.New().WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer()). + WithSync(c.sync). + ToController("addon-template-controller", recorder) +} + +func (c *addonTemplateController) stopUnusedManagers( + ctx context.Context, syncCtx factory.SyncContext, addOnName string) { + + stopFunc, ok := c.addonManagers[addOnName] + if ok { + stopFunc() + klog.Infof("Stop the manager for addon %s", addOnName) + } +} + +func (c *addonTemplateController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + key := syncCtx.QueueKey() + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is not in format: namespace/name + return nil + } + + cma, err := c.cmaLister.Get(addonName) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + if !templateagent.SupportAddOnTemplate(cma) { + c.stopUnusedManagers(ctx, syncCtx, cma.Name) + return nil + } + + _, exist := c.addonManagers[addonName] + if exist { + klog.Infof("There already is a manager started for addon %s, skip.", addonName) + return nil + } + + klog.Infof("Start an addon manager for addon %s", addonName) + + stopFunc := c.startManager(ctx, addonName) + c.addonManagers[addonName] = stopFunc + return nil +} + +func (c *addonTemplateController) startManager( + pctx context.Context, + addonName string) context.CancelFunc { + ctx, stopFunc := context.WithCancel(pctx) + go func() { + err := c.runControllerFunc(ctx, addonName) + if err != nil { + klog.Errorf("run controller for addon %s error: %v", addonName, err) + utilruntime.HandleError(err) + } + + // use the parent context to start all shared informers, otherwise once the context is cancelled, + // the informers will stop and all other shared go routines will be impacted. + c.workInformers.Start(pctx.Done()) + c.addonInformers.Start(pctx.Done()) + c.clusterInformers.Start(pctx.Done()) + c.dynamicInformers.Start(pctx.Done()) + + <-ctx.Done() + klog.Infof("Addon %s Manager stopped", addonName) + }() + return stopFunc +} + +func (c *addonTemplateController) runController( + ctx context.Context, addonName string) error { + mgr, err := addonmanager.New(c.kubeConfig) + if err != nil { + return err + } + + kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(c.kubeClient, 10*time.Minute, + kubeinformers.WithTweakListOptions(func(listOptions *metav1.ListOptions) { + selector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: addonv1alpha1.AddonLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: []string{addonName}, + }, + }, + } + listOptions.LabelSelector = metav1.FormatLabelSelector(selector) + }), + ) + + agentAddon := templateagent.NewCRDTemplateAgentAddon( + addonName, + // TODO: agentName should not be changed after restarting the agent + utilrand.String(5), + c.kubeClient, + c.addonClient, + c.addonInformers, + kubeInformers.Rbac().V1().RoleBindings().Lister(), + addonfactory.GetAddOnDeploymentConfigValues( + addonfactory.NewAddOnDeploymentConfigGetter(c.addonClient), + addonfactory.ToAddOnCustomizedVariableValues, + templateagent.ToAddOnNodePlacementPrivateValues, + templateagent.ToAddOnRegistriesPrivateValues, + ), + ) + err = mgr.AddAgent(agentAddon) + if err != nil { + return err + } + + err = mgr.StartWithInformers(ctx, kubeInformers, c.workInformers, c.addonInformers, c.clusterInformers, c.dynamicInformers) + if err != nil { + return err + } + + kubeInformers.Start(ctx.Done()) + + return nil +} diff --git a/pkg/addon/controllers/addontemplate/controller_test.go b/pkg/addon/controllers/addontemplate/controller_test.go new file mode 100644 index 000000000..46571195e --- /dev/null +++ b/pkg/addon/controllers/addontemplate/controller_test.go @@ -0,0 +1,217 @@ +package addontemplate + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic/dynamicinformer" + dynamicfake "k8s.io/client-go/dynamic/fake" + fakekube "k8s.io/client-go/kubernetes/fake" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/utils" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + fakework "open-cluster-management.io/api/client/work/clientset/versioned/fake" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + + testingcommon "open-cluster-management.io/ocm/pkg/common/testing" +) + +func TestReconcile(t *testing.T) { + cases := []struct { + name string + syncKeys []string + managedClusteraddon []runtime.Object + clusterManagementAddon []runtime.Object + expectedCount int + expectedTimeout bool + }{ + { + name: "no clustermanagementaddon", + syncKeys: []string{"test"}, + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{}, + expectedCount: 0, + expectedTimeout: true, + }, + { + name: "not template type clustermanagementaddon", + syncKeys: []string{"test"}, + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{ + addontesting.NewClusterManagementAddon("test", "", "").Build()}, + expectedCount: 0, + expectedTimeout: true, + }, + { + name: "one template type clustermanagementaddon", + syncKeys: []string{"test"}, + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{ + addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs( + addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ + Group: utils.AddOnTemplateGVR.Group, + Resource: utils.AddOnTemplateGVR.Resource, + }, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).Build()}, + expectedCount: 1, + expectedTimeout: false, + }, + { + name: "two template type clustermanagementaddon", + syncKeys: []string{"test", "test1"}, + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{ + addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs( + addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ + Group: utils.AddOnTemplateGVR.Group, + Resource: utils.AddOnTemplateGVR.Resource, + }, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).Build(), + addontesting.NewClusterManagementAddon("test1", "", "").WithSupportedConfigs( + addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ + Group: utils.AddOnTemplateGVR.Group, + Resource: utils.AddOnTemplateGVR.Resource, + }, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).Build(), + }, + expectedCount: 2, + expectedTimeout: false, + }, + { + name: "two template type and one not template type clustermanagementaddon", + syncKeys: []string{"test", "test1", "test2"}, + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{ + addontesting.NewClusterManagementAddon("test", "", "").WithSupportedConfigs( + addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ + Group: utils.AddOnTemplateGVR.Group, + Resource: utils.AddOnTemplateGVR.Resource, + }, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).Build(), + addontesting.NewClusterManagementAddon("test1", "", "").WithSupportedConfigs( + addonv1alpha1.ConfigMeta{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{ + Group: utils.AddOnTemplateGVR.Group, + Resource: utils.AddOnTemplateGVR.Resource, + }, + DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, + }).Build(), + addontesting.NewClusterManagementAddon("test2", "", "").Build(), + }, + expectedCount: 2, + expectedTimeout: true, + }, + } + + for _, c := range cases { + count := 0 + var wg sync.WaitGroup + lock := &sync.Mutex{} + rederCount := func() int { + lock.Lock() + defer lock.Unlock() + return count + } + increaseCount := func() { + lock.Lock() + defer lock.Unlock() + count = count + 1 + } + + for range c.syncKeys { + wg.Add(1) + } + runController := func(ctx context.Context, addonName string) error { + defer wg.Done() + increaseCount() + return nil + } + obj := append(c.clusterManagementAddon, c.managedClusteraddon...) + fakeAddonClient := fakeaddon.NewSimpleClientset(obj...) + + addonInformers := addoninformers.NewSharedInformerFactory(fakeAddonClient, 10*time.Minute) + + for _, obj := range c.managedClusteraddon { + if err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + for _, obj := range c.clusterManagementAddon { + if err := addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()) + dynamicInformerFactory := dynamicinformer.NewDynamicSharedInformerFactory(fakeDynamicClient, 0) + + fakeClusterClient := fakecluster.NewSimpleClientset() + clusterInformers := clusterv1informers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute) + + fakeWorkClient := fakework.NewSimpleClientset() + workInformers := workinformers.NewSharedInformerFactory(fakeWorkClient, 10*time.Minute) + + hubKubeClient := fakekube.NewSimpleClientset() + + controller := NewAddonTemplateController( + nil, + hubKubeClient, + fakeAddonClient, + addonInformers, + clusterInformers, + dynamicInformerFactory, + workInformers, + eventstesting.NewTestingEventRecorder(t), + runController, + ) + ctx := context.TODO() + for _, syncKey := range c.syncKeys { + syncContext := testingcommon.NewFakeSyncContext(t, syncKey) + err := controller.Sync(ctx, syncContext) + if err != nil { + t.Errorf("expected no error when sync: %v", err) + } + } + + ch := make(chan struct{}) + go func() { + defer close(ch) + wg.Wait() + }() + + select { + case <-ch: + actualCount := rederCount() + if actualCount != c.expectedCount { + t.Errorf("name : %s, expected runControllerFunc to be called %d, but was called %d times", + c.name, c.expectedCount, actualCount) + } + case <-time.After(1 * time.Second): + if !c.expectedTimeout { + t.Errorf("name : %s, expected not timeout, but timeout", c.name) + } + actualCount := rederCount() + if actualCount != c.expectedCount { + t.Errorf("name : %s, expected runControllerFunc to be called %d, but was called %d times", + c.name, c.expectedCount, actualCount) + } + } + } +} diff --git a/pkg/addon/controllers/managementaddoninstallprogression/controller.go b/pkg/addon/controllers/managementaddoninstallprogression/controller.go new file mode 100644 index 000000000..7b5a56e5a --- /dev/null +++ b/pkg/addon/controllers/managementaddoninstallprogression/controller.go @@ -0,0 +1,255 @@ +package managementaddoninstallprogression + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" +) + +// managementAddonInstallProgressionController reconciles instances of clustermanagementaddon the hub +// based to update related object and status condition. +type managementAddonInstallProgressionController struct { + addonClient addonv1alpha1client.Interface + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + addonFilterFunc factory.EventFilterFunc +} + +func NewManagementAddonInstallProgressionController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + addonFilterFunc factory.EventFilterFunc, + recorder events.Recorder, +) factory.Controller { + c := &managementAddonInstallProgressionController{ + addonClient: addonClient, + managedClusterAddonLister: addonInformers.Lister(), + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + addonFilterFunc: addonFilterFunc, + } + + return factory.New().WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{accessor.GetName()} + }, + addonInformers.Informer(), clusterManagementAddonInformers.Informer()). + WithSync(c.sync).ToController("management-addon-status-controller", recorder) + +} + +func (c *managementAddonInstallProgressionController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + addonName := syncCtx.QueueKey() + klog.V(4).Infof("Reconciling addon %q", addonName) + + mgmtAddon, err := c.clusterManagementAddonLister.Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + mgmtAddonCopy := mgmtAddon.DeepCopy() + + clusterManagementAddon, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + // set default config reference + mgmtAddonCopy.Status.DefaultConfigReferences = setDefaultConfigReference(mgmtAddonCopy.Spec.SupportedConfigs, mgmtAddonCopy.Status.DefaultConfigReferences) + + // update default config reference when type is manual + if mgmtAddonCopy.Spec.InstallStrategy.Type == "" || mgmtAddonCopy.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + mgmtAddonCopy.Status.InstallProgressions = []addonv1alpha1.InstallProgression{} + return c.patchMgmtAddonStatus(ctx, mgmtAddonCopy, mgmtAddon) + } + + // only update default config references and skip updating install progression for self-managed addon + if !c.addonFilterFunc(clusterManagementAddon) { + return c.patchMgmtAddonStatus(ctx, mgmtAddonCopy, mgmtAddon) + } + + // set install progression + mgmtAddonCopy.Status.InstallProgressions = setInstallProgression(mgmtAddonCopy.Spec.SupportedConfigs, + mgmtAddonCopy.Spec.InstallStrategy.Placements, mgmtAddonCopy.Status.InstallProgressions) + + // update cma status + return c.patchMgmtAddonStatus(ctx, mgmtAddonCopy, mgmtAddon) +} + +func (c *managementAddonInstallProgressionController) patchMgmtAddonStatus(ctx context.Context, new, old *addonv1alpha1.ClusterManagementAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + DefaultConfigReferences: old.Status.DefaultConfigReferences, + InstallProgressions: old.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + DefaultConfigReferences: new.Status.DefaultConfigReferences, + InstallProgressions: new.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching clustermanagementaddon %s status with %s", new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +func setDefaultConfigReference(supportedConfigs []addonv1alpha1.ConfigMeta, + existDefaultConfigReferences []addonv1alpha1.DefaultConfigReference) []addonv1alpha1.DefaultConfigReference { + newDefaultConfigReferences := []addonv1alpha1.DefaultConfigReference{} + for _, config := range supportedConfigs { + if config.DefaultConfig == nil { + continue + } + configRef := addonv1alpha1.DefaultConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: *config.DefaultConfig, + }, + } + // if the config already exists in status, keep the existing spec hash + if existConfigRef, exist := findDefaultConfigReference(&configRef, existDefaultConfigReferences); exist { + configRef.DesiredConfig.SpecHash = existConfigRef.DesiredConfig.SpecHash + } + newDefaultConfigReferences = append(newDefaultConfigReferences, configRef) + } + return newDefaultConfigReferences +} + +func findDefaultConfigReference( + newobj *addonv1alpha1.DefaultConfigReference, + oldobjs []addonv1alpha1.DefaultConfigReference, +) (*addonv1alpha1.DefaultConfigReference, bool) { + for _, oldconfig := range oldobjs { + if oldconfig.ConfigGroupResource == newobj.ConfigGroupResource && oldconfig.DesiredConfig.ConfigReferent == newobj.DesiredConfig.ConfigReferent { + return &oldconfig, true + } + } + return nil, false +} + +func setInstallProgression(supportedConfigs []addonv1alpha1.ConfigMeta, placementStrategies []addonv1alpha1.PlacementStrategy, + existInstallProgressions []addonv1alpha1.InstallProgression) []addonv1alpha1.InstallProgression { + newInstallProgressions := []addonv1alpha1.InstallProgression{} + for _, placementStrategy := range placementStrategies { + // set placement ref + installProgression := addonv1alpha1.InstallProgression{ + PlacementRef: placementStrategy.PlacementRef, + } + + // set config references as default configuration + installConfigReferences := []addonv1alpha1.InstallConfigReference{} + installConfigReferencesMap := map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReferent{} + for _, config := range supportedConfigs { + if config.DefaultConfig != nil { + installConfigReferencesMap[config.ConfigGroupResource] = *config.DefaultConfig + } + } + + // override the default configuration for each placement + for _, config := range placementStrategy.Configs { + installConfigReferencesMap[config.ConfigGroupResource] = config.ConfigReferent + } + + // set the config references for each install progression + for k, v := range installConfigReferencesMap { + installConfigReferences = append(installConfigReferences, + addonv1alpha1.InstallConfigReference{ + ConfigGroupResource: k, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: v, + }, + }, + ) + } + installProgression.ConfigReferences = installConfigReferences + + // if the config group resource already exists in status, merge the install progression + if existInstallProgression, exist := findInstallProgression(&installProgression, existInstallProgressions); exist { + mergeInstallProgression(&installProgression, existInstallProgression) + } + newInstallProgressions = append(newInstallProgressions, installProgression) + } + return newInstallProgressions +} + +func findInstallProgression(newobj *addonv1alpha1.InstallProgression, oldobjs []addonv1alpha1.InstallProgression) (*addonv1alpha1.InstallProgression, bool) { + for _, oldobj := range oldobjs { + if oldobj.PlacementRef == newobj.PlacementRef { + count := 0 + for _, oldconfig := range oldobj.ConfigReferences { + for _, newconfig := range newobj.ConfigReferences { + if oldconfig.ConfigGroupResource == newconfig.ConfigGroupResource { + count += 1 + } + } + } + if count == len(newobj.ConfigReferences) { + return &oldobj, true + } + } + } + return nil, false +} + +func mergeInstallProgression(newobj, oldobj *addonv1alpha1.InstallProgression) { + // merge config reference + for i := range newobj.ConfigReferences { + for _, oldconfig := range oldobj.ConfigReferences { + if newobj.ConfigReferences[i].ConfigGroupResource == oldconfig.ConfigGroupResource { + if newobj.ConfigReferences[i].DesiredConfig.ConfigReferent == oldconfig.DesiredConfig.ConfigReferent { + newobj.ConfigReferences[i].DesiredConfig.SpecHash = oldconfig.DesiredConfig.SpecHash + } + newobj.ConfigReferences[i].LastAppliedConfig = oldconfig.LastAppliedConfig.DeepCopy() + newobj.ConfigReferences[i].LastKnownGoodConfig = oldconfig.LastKnownGoodConfig.DeepCopy() + } + } + } + newobj.Conditions = oldobj.Conditions +} diff --git a/pkg/addon/controllers/managementaddoninstallprogression/controller_test.go b/pkg/addon/controllers/managementaddoninstallprogression/controller_test.go new file mode 100644 index 000000000..2d4825fcc --- /dev/null +++ b/pkg/addon/controllers/managementaddoninstallprogression/controller_test.go @@ -0,0 +1,259 @@ +package managementaddoninstallprogression + +import ( + "context" + "encoding/json" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime" + clienttesting "k8s.io/client-go/testing" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + + testingcommon "open-cluster-management.io/ocm/pkg/common/testing" +) + +func TestReconcile(t *testing.T) { + cases := []struct { + name string + syncKey string + managedClusteraddon []runtime.Object + clusterManagementAddon []runtime.Object + validateAddonActions func(t *testing.T, actions []clienttesting.Action) + }{ + { + name: "no clustermanagementaddon", + syncKey: "test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "update clustermanagementaddon status with type manual with no configs", + syncKey: "test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").Build()}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "update clustermanagementaddon status with type manual with supported configs", + syncKey: "test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").WithSupportedConfigs( + addonapiv1alpha1.ConfigMeta{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addonhubconfigs", + }, + DefaultConfig: &addonapiv1alpha1.ConfigReferent{ + Name: "test", + Namespace: "test", + }, + }).Build()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonapiv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 1 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if len(cma.Status.InstallProgressions) != 0 { + t.Errorf("InstallProgressions object is not correct: %v", cma.Status.InstallProgressions) + } + }, + }, + { + name: "update clustermanagementaddon status with type manual with invalid supported configs", + syncKey: "test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").WithSupportedConfigs( + addonapiv1alpha1.ConfigMeta{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addonhubconfigs", + }, + }).Build()}, + validateAddonActions: addontesting.AssertNoActions, + }, + { + name: "update clustermanagementaddon status with type placements", + syncKey: "test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").WithPlacementStrategy( + addonapiv1alpha1.PlacementStrategy{ + PlacementRef: addonapiv1alpha1.PlacementRef{ + Name: "placement1", + Namespace: "test", + }, + }, + addonapiv1alpha1.PlacementStrategy{ + PlacementRef: addonapiv1alpha1.PlacementRef{ + Name: "placement2", + Namespace: "test", + }, + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addondeploymentconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: "test", + Namespace: "test", + }, + }, + }, + }, + ).Build()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonapiv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 0 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if len(cma.Status.InstallProgressions) != 2 { + t.Errorf("InstallProgressions object is not correct: %v", cma.Status.InstallProgressions) + } + if len(cma.Status.InstallProgressions[0].ConfigReferences) != 0 { + t.Errorf("InstallProgressions ConfigReferences object is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences) + } + if len(cma.Status.InstallProgressions[1].ConfigReferences) != 1 { + t.Errorf("InstallProgressions ConfigReferences object is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences) + } + + }, + }, + { + name: "update clustermanagementaddon status with type placements and default configs", + syncKey: "test", + managedClusteraddon: []runtime.Object{}, + clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "testcrd", "testcr").WithPlacementStrategy( + addonapiv1alpha1.PlacementStrategy{ + PlacementRef: addonapiv1alpha1.PlacementRef{ + Name: "placement1", + Namespace: "test", + }, + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addonhubconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: "test1", + Namespace: "test", + }, + }, + }, + }, + addonapiv1alpha1.PlacementStrategy{ + PlacementRef: addonapiv1alpha1.PlacementRef{ + Name: "placement2", + Namespace: "test", + }, + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addondeploymentconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: "test", + Namespace: "test", + }, + }, + }, + }, + ).WithSupportedConfigs( + addonapiv1alpha1.ConfigMeta{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addonhubconfigs", + }, + DefaultConfig: &addonapiv1alpha1.ConfigReferent{ + Name: "test", + Namespace: "test", + }, + }).Build()}, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch") + actual := actions[0].(clienttesting.PatchActionImpl).Patch + cma := &addonapiv1alpha1.ClusterManagementAddOn{} + err := json.Unmarshal(actual, cma) + if err != nil { + t.Fatal(err) + } + + if len(cma.Status.DefaultConfigReferences) != 1 { + t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) + } + if len(cma.Status.InstallProgressions) != 2 { + t.Errorf("InstallProgressions object is not correct: %v", cma.Status.InstallProgressions) + } + if len(cma.Status.InstallProgressions[0].ConfigReferences) != 1 { + t.Errorf("InstallProgressions ConfigReferences object is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences) + } + if cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig.Name != "test1" { + t.Errorf("InstallProgressions ConfigReferences object is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig.Name) + } + if len(cma.Status.InstallProgressions[1].ConfigReferences) != 2 { + t.Errorf("InstallProgressions ConfigReferences object is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences) + } + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + obj := append(c.clusterManagementAddon, c.managedClusteraddon...) + fakeAddonClient := fakeaddon.NewSimpleClientset(obj...) + + addonInformers := addoninformers.NewSharedInformerFactory(fakeAddonClient, 10*time.Minute) + + for _, obj := range c.managedClusteraddon { + if err := addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + for _, obj := range c.clusterManagementAddon { + if err := addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().GetStore().Add(obj); err != nil { + t.Fatal(err) + } + } + + syncContext := testingcommon.NewFakeSyncContext(t, c.syncKey) + recorder := syncContext.Recorder() + + controller := NewManagementAddonInstallProgressionController( + fakeAddonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + utils.ManagedBySelf(map[string]agent.AgentAddon{"test": nil}), + recorder, + ) + + err := controller.Sync(context.TODO(), syncContext) + if err != nil { + t.Errorf("expected no error when sync: %v", err) + } + c.validateAddonActions(t, fakeAddonClient.Actions()) + }) + } +} diff --git a/pkg/addon/manager.go b/pkg/addon/manager.go new file mode 100644 index 000000000..a64d69457 --- /dev/null +++ b/pkg/addon/manager.go @@ -0,0 +1,191 @@ +package addon + +import ( + "context" + "time" + + "github.com/openshift/library-go/pkg/controller/controllercmd" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/utils" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" + + "open-cluster-management.io/ocm/pkg/addon/controllers/addonconfiguration" + "open-cluster-management.io/ocm/pkg/addon/controllers/addonmanagement" + "open-cluster-management.io/ocm/pkg/addon/controllers/addonowner" + "open-cluster-management.io/ocm/pkg/addon/controllers/addonprogressing" + "open-cluster-management.io/ocm/pkg/addon/controllers/addontemplate" + "open-cluster-management.io/ocm/pkg/addon/controllers/managementaddoninstallprogression" +) + +func RunManager(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { + kubeConfig := controllerContext.KubeConfig + hubKubeClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return err + } + + hubClusterClient, err := clusterclientset.NewForConfig(kubeConfig) + if err != nil { + return err + } + + addonClient, err := addonv1alpha1client.NewForConfig(kubeConfig) + if err != nil { + return err + } + + workClient, err := workv1client.NewForConfig(kubeConfig) + if err != nil { + return err + } + + dynamicClient, err := dynamic.NewForConfig(kubeConfig) + if err != nil { + return err + } + + clusterInformerFactory := clusterinformers.NewSharedInformerFactory(hubClusterClient, 30*time.Minute) + addonInformerFactory := addoninformers.NewSharedInformerFactory(addonClient, 30*time.Minute) + workInformers := workv1informers.NewSharedInformerFactoryWithOptions(workClient, 10*time.Minute, + workv1informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) { + selector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: addonv1alpha1.AddonLabelKey, + Operator: metav1.LabelSelectorOpExists, + }, + }, + } + listOptions.LabelSelector = metav1.FormatLabelSelector(selector) + }), + ) + + // addonDeployController + err = workInformers.Work().V1().ManifestWorks().Informer().AddIndexers( + cache.Indexers{ + index.ManifestWorkByAddon: index.IndexManifestWorkByAddon, + index.ManifestWorkByHostedAddon: index.IndexManifestWorkByHostedAddon, + index.ManifestWorkHookByHostedAddon: index.IndexManifestWorkHookByHostedAddon, + }, + ) + if err != nil { + return err + } + + // addonConfigController + err = addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{index.AddonByConfig: index.IndexAddonByConfig}, + ) + if err != nil { + return err + } + // managementAddonConfigController + err = addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns().Informer().AddIndexers( + cache.Indexers{index.ClusterManagementAddonByConfig: index.IndexClusterManagementAddonByConfig}) + if err != nil { + return err + } + + err = addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ClusterManagementAddonByPlacement: index.IndexClusterManagementAddonByPlacement, + }) + if err != nil { + return err + } + + err = addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ManagedClusterAddonByName: index.IndexManagedClusterAddonByName, + }) + if err != nil { + return err + } + + dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 10*time.Minute) + + addonManagementController := addonmanagement.NewAddonManagementController( + addonClient, + addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns(), + clusterInformerFactory.Cluster().V1beta1().Placements(), + clusterInformerFactory.Cluster().V1beta1().PlacementDecisions(), + utils.ManagedByAddonManager, + controllerContext.EventRecorder, + ) + + addonConfigurationController := addonconfiguration.NewAddonConfigurationController( + addonClient, + addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns(), + clusterInformerFactory.Cluster().V1beta1().Placements(), + clusterInformerFactory.Cluster().V1beta1().PlacementDecisions(), + utils.ManagedByAddonManager, + controllerContext.EventRecorder, + ) + + addonOwnerController := addonowner.NewAddonOwnerController( + addonClient, + addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns(), + utils.ManagedByAddonManager, + controllerContext.EventRecorder, + ) + + addonProgressingController := addonprogressing.NewAddonProgressingController( + addonClient, + addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns(), + workInformers.Work().V1().ManifestWorks(), + utils.ManagedByAddonManager, + controllerContext.EventRecorder, + ) + + mgmtAddonInstallProgressionController := managementaddoninstallprogression.NewManagementAddonInstallProgressionController( + addonClient, + addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns(), + utils.ManagedByAddonManager, + controllerContext.EventRecorder, + ) + + addonTemplateController := addontemplate.NewAddonTemplateController( + kubeConfig, + hubKubeClient, + addonClient, + addonInformerFactory, + clusterInformerFactory, + dynamicInformers, + workInformers, + controllerContext.EventRecorder, + ) + + go addonManagementController.Run(ctx, 2) + go addonConfigurationController.Run(ctx, 2) + go addonOwnerController.Run(ctx, 2) + go addonProgressingController.Run(ctx, 2) + go mgmtAddonInstallProgressionController.Run(ctx, 2) + // There should be only one instance of addonTemplateController running, since the addonTemplateController will + // start a goroutine for each template-type addon it watches. + go addonTemplateController.Run(ctx, 1) + + clusterInformerFactory.Start(ctx.Done()) + addonInformerFactory.Start(ctx.Done()) + workInformers.Start(ctx.Done()) + dynamicInformers.Start(ctx.Done()) + + <-ctx.Done() + return nil +} diff --git a/pkg/addon/templateagent/common.go b/pkg/addon/templateagent/common.go new file mode 100644 index 000000000..d3d5740f7 --- /dev/null +++ b/pkg/addon/templateagent/common.go @@ -0,0 +1,49 @@ +package templateagent + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +// AddonTemplateConfigRef return the first addon template config +func AddonTemplateConfigRef( + configReferences []addonapiv1alpha1.ConfigReference) (bool, addonapiv1alpha1.ConfigReference) { + for _, config := range configReferences { + if config.Group == utils.AddOnTemplateGVR.Group && config.Resource == utils.AddOnTemplateGVR.Resource { + return true, config + } + } + return false, addonapiv1alpha1.ConfigReference{} +} + +// GetTemplateSpecHash returns the sha256 hash of the spec field of the addon template +func GetTemplateSpecHash(template *addonapiv1alpha1.AddOnTemplate) (string, error) { + unstructuredTemplate, err := runtime.DefaultUnstructuredConverter.ToUnstructured(template) + if err != nil { + return "", err + } + specHash, err := utils.GetSpecHash(&unstructured.Unstructured{ + Object: unstructuredTemplate, + }) + if err != nil { + return specHash, err + } + return specHash, nil +} + +// SupportAddOnTemplate return true if the given ClusterManagementAddOn supports the AddOnTemplate +func SupportAddOnTemplate(cma *addonapiv1alpha1.ClusterManagementAddOn) bool { + if cma == nil { + return false + } + + for _, config := range cma.Spec.SupportedConfigs { + if config.Group == utils.AddOnTemplateGVR.Group && config.Resource == utils.AddOnTemplateGVR.Resource { + return true + } + } + return false +} diff --git a/pkg/addon/templateagent/decorator.go b/pkg/addon/templateagent/decorator.go new file mode 100644 index 000000000..ad98c58fe --- /dev/null +++ b/pkg/addon/templateagent/decorator.go @@ -0,0 +1,190 @@ +package templateagent + +import ( + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "open-cluster-management.io/addon-framework/pkg/addonfactory" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +type deploymentDecorator interface { + // decorate modifies the deployment in place + decorate(deployment *appsv1.Deployment) error +} + +type environmentDecorator struct { + orderedValues orderedValues +} + +func newEnvironmentDecorator(orderedValues orderedValues) deploymentDecorator { + return &environmentDecorator{ + orderedValues: orderedValues, + } +} +func (d *environmentDecorator) decorate(deployment *appsv1.Deployment) error { + envVars := make([]corev1.EnvVar, len(d.orderedValues)) + for index, value := range d.orderedValues { + envVars[index] = corev1.EnvVar{ + Name: value.name, + Value: value.value, + } + } + + for j := range deployment.Spec.Template.Spec.Containers { + deployment.Spec.Template.Spec.Containers[j].Env = append( + deployment.Spec.Template.Spec.Containers[j].Env, + envVars...) + } + + return nil +} + +type volumeDecorator struct { + template *addonapiv1alpha1.AddOnTemplate + addonName string +} + +func newVolumeDecorator(addonName string, template *addonapiv1alpha1.AddOnTemplate) deploymentDecorator { + return &volumeDecorator{ + addonName: addonName, + template: template, + } +} + +func (d *volumeDecorator) decorate(deployment *appsv1.Deployment) error { + + volumeMounts := []corev1.VolumeMount{} + volumes := []corev1.Volume{} + + for _, registration := range d.template.Spec.Registration { + if registration.Type == addonapiv1alpha1.RegistrationTypeKubeClient { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "hub-kubeconfig", + MountPath: hubKubeconfigSecretMountPath(), + }) + volumes = append(volumes, corev1.Volume{ + Name: "hub-kubeconfig", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: hubKubeconfigSecretName(d.addonName), + }, + }, + }) + } + + if registration.Type == addonapiv1alpha1.RegistrationTypeCustomSigner { + if registration.CustomSigner == nil { + return fmt.Errorf("custom signer is nil") + } + name := fmt.Sprintf("cert-%s", strings.ReplaceAll( + strings.ReplaceAll(registration.CustomSigner.SignerName, "/", "-"), + ".", "-")) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: name, + MountPath: customSignedSecretMountPath(registration.CustomSigner.SignerName), + }) + volumes = append(volumes, corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: getCustomSignedSecretName(d.addonName, registration.CustomSigner.SignerName), + }, + }, + }) + } + } + + if len(volumeMounts) == 0 || len(volumes) == 0 { + return nil + } + + for j := range deployment.Spec.Template.Spec.Containers { + deployment.Spec.Template.Spec.Containers[j].VolumeMounts = append( + deployment.Spec.Template.Spec.Containers[j].VolumeMounts, volumeMounts...) + } + + deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volumes...) + + return nil +} + +type nodePlacementDecorator struct { + privateValues addonfactory.Values +} + +func newNodePlacementDecorator(privateValues addonfactory.Values) deploymentDecorator { + return &nodePlacementDecorator{ + privateValues: privateValues, + } +} + +func (d *nodePlacementDecorator) decorate(deployment *appsv1.Deployment) error { + nodePlacement, ok := d.privateValues[NodePlacementPrivateValueKey] + if !ok { + return nil + } + + np, ok := nodePlacement.(*addonapiv1alpha1.NodePlacement) + if !ok { + return fmt.Errorf("node placement value is invalid") + } + + if np.NodeSelector != nil { + deployment.Spec.Template.Spec.NodeSelector = np.NodeSelector + } + + if np.NodeSelector != nil { + deployment.Spec.Template.Spec.Tolerations = np.Tolerations + } + + return nil +} + +type imageDecorator struct { + privateValues addonfactory.Values +} + +func newImageDecorator(privateValues addonfactory.Values) deploymentDecorator { + return &imageDecorator{ + privateValues: privateValues, + } +} + +func (d *imageDecorator) decorate(deployment *appsv1.Deployment) error { + registries, ok := d.privateValues[RegistriesPrivateValueKey] + if !ok { + return nil + } + + ims, ok := registries.([]addonapiv1alpha1.ImageMirror) + if !ok { + return fmt.Errorf("registries value is invalid") + } + + for i := range deployment.Spec.Template.Spec.Containers { + deployment.Spec.Template.Spec.Containers[i].Image = addonfactory.OverrideImage( + ims, deployment.Spec.Template.Spec.Containers[i].Image) + } + + return nil +} + +func hubKubeconfigSecretMountPath() string { + return "/managed/hub-kubeconfig" +} + +func hubKubeconfigSecretName(addonName string) string { + return fmt.Sprintf("%s-hub-kubeconfig", addonName) +} + +func getCustomSignedSecretName(addonName, signerName string) string { + return fmt.Sprintf("%s-%s-client-cert", addonName, strings.ReplaceAll(signerName, "/", "-")) +} + +func customSignedSecretMountPath(signerName string) string { + return fmt.Sprintf("/managed/%s", strings.ReplaceAll(signerName, "/", "-")) +} diff --git a/pkg/addon/templateagent/registration.go b/pkg/addon/templateagent/registration.go new file mode 100644 index 000000000..e43cc4099 --- /dev/null +++ b/pkg/addon/templateagent/registration.go @@ -0,0 +1,415 @@ +package templateagent + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "strings" + "time" + + openshiftcrypto "github.com/openshift/library-go/pkg/crypto" + "github.com/pkg/errors" + certificatesv1 "k8s.io/api/certificates/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +const ( + TLSCACert = "ca.crt" + TLSCAKey = "ca.key" + // AddonTemplateLabelKey is the label key to set addon template name. It is to set the resources on the hub relating + // to an addon template + AddonTemplateLabelKey = "open-cluster-management.io/addon-template-name" +) + +func (a *CRDTemplateAgentAddon) GetDesiredAddOnTemplate(addon *addonapiv1alpha1.ManagedClusterAddOn, + clusterName, addonName string) (*addonapiv1alpha1.AddOnTemplate, error) { + + if addon == nil { + var err error + addon, err = a.addonLister.ManagedClusterAddOns(clusterName).Get(addonName) + if err != nil { + return nil, err + } + } + + return a.GetDesiredAddOnTemplateByAddon(addon) +} + +func (a *CRDTemplateAgentAddon) TemplateCSRConfigurationsFunc() func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + + return func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + template, err := a.GetDesiredAddOnTemplate(nil, cluster.Name, a.addonName) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get addon %s template: %v", a.addonName, err)) + return nil + } + if template == nil { + return nil + } + + contain := func(rcs []addonapiv1alpha1.RegistrationConfig, signerName string) bool { + for _, rc := range rcs { + if rc.SignerName == signerName { + return true + } + } + return false + } + + registrationConfigs := make([]addonapiv1alpha1.RegistrationConfig, 0) + for _, registration := range template.Spec.Registration { + switch registration.Type { + case addonapiv1alpha1.RegistrationTypeKubeClient: + if !contain(registrationConfigs, certificatesv1.KubeAPIServerClientSignerName) { + configs := agent.KubeClientSignerConfigurations(a.addonName, a.agentName)(cluster) + registrationConfigs = append(registrationConfigs, configs...) + } + + case addonapiv1alpha1.RegistrationTypeCustomSigner: + if registration.CustomSigner == nil { + continue + } + if !contain(registrationConfigs, registration.CustomSigner.SignerName) { + configs := CustomSignerConfigurations( + a.addonName, a.agentName, registration.CustomSigner)(cluster) + registrationConfigs = append(registrationConfigs, configs...) + } + + default: + utilruntime.HandleError(fmt.Errorf("unsupported registration type %s", registration.Type)) + } + + } + + return registrationConfigs + } +} + +// CustomSignerConfigurations returns a func that can generate RegistrationConfig +// for CustomSigner type registration addon +func CustomSignerConfigurations(addonName, agentName string, + customSignerConfig *addonapiv1alpha1.CustomSignerRegistrationConfig, +) func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + return func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + if customSignerConfig == nil { + utilruntime.HandleError(fmt.Errorf("custome signer is nil")) + } + config := addonapiv1alpha1.RegistrationConfig{ + SignerName: customSignerConfig.SignerName, + // TODO: confirm the subject + Subject: addonapiv1alpha1.Subject{ + User: agent.DefaultUser(cluster.Name, addonName, agentName), + Groups: agent.DefaultGroups(cluster.Name, addonName), + }, + } + if customSignerConfig.Subject != nil { + config.Subject = *customSignerConfig.Subject + } + + return []addonapiv1alpha1.RegistrationConfig{config} + } +} + +func (a *CRDTemplateAgentAddon) TemplateCSRApproveCheckFunc() agent.CSRApproveFunc { + + return func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, + csr *certificatesv1.CertificateSigningRequest) bool { + + template, err := a.GetDesiredAddOnTemplate(addon, cluster.Name, a.addonName) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get addon %s template: %v", a.addonName, err)) + return false + } + if template == nil { + return false + } + + for _, registration := range template.Spec.Registration { + switch registration.Type { + case addonapiv1alpha1.RegistrationTypeKubeClient: + + if csr.Spec.SignerName == certificatesv1.KubeAPIServerClientSignerName { + return KubeClientCSRApprover(a.agentName)(cluster, addon, csr) + } + + case addonapiv1alpha1.RegistrationTypeCustomSigner: + if registration.CustomSigner == nil { + continue + } + if csr.Spec.SignerName == registration.CustomSigner.SignerName { + return CustomerSignerCSRApprover(a.addonName)(cluster, addon, csr) + } + + default: + utilruntime.HandleError(fmt.Errorf("unsupported registration type %s", registration.Type)) + } + + } + + return false + } +} + +// KubeClientCSRApprover approve the csr when addon agent uses default group, default user and +// "kubernetes.io/kube-apiserver-client" signer to sign csr. +func KubeClientCSRApprover(agentName string) agent.CSRApproveFunc { + return func( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, + csr *certificatesv1.CertificateSigningRequest) bool { + if csr.Spec.SignerName != certificatesv1.KubeAPIServerClientSignerName { + return false + } + return utils.DefaultCSRApprover(agentName)(cluster, addon, csr) + } +} + +// CustomerSignerCSRApprover approve the csr when addon agent uses custom signer to sign csr. +func CustomerSignerCSRApprover(agentName string) agent.CSRApproveFunc { + return func( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, + csr *certificatesv1.CertificateSigningRequest) bool { + + klog.Infof("Customer signer CSR is approved. cluster: %s, addon %s, requester: %s", + cluster.Name, addon.Name, csr.Spec.Username) + return true + } +} + +func (a *CRDTemplateAgentAddon) TemplateCSRSignFunc() agent.CSRSignerFunc { + + return func(csr *certificatesv1.CertificateSigningRequest) []byte { + // TODO: consider to change the agent.CSRSignerFun to accept parameter addon + getClusterName := func(userName string) string { + return csr.Labels[clusterv1.ClusterNameLabelKey] + } + + clusterName := getClusterName(csr.Spec.Username) + template, err := a.GetDesiredAddOnTemplate(nil, clusterName, a.addonName) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get template for addon %s in cluster %s: %v", + a.addonName, clusterName, err)) + return nil + } + if template == nil { + return nil + } + + for _, registration := range template.Spec.Registration { + switch registration.Type { + case addonapiv1alpha1.RegistrationTypeKubeClient: + continue + + case addonapiv1alpha1.RegistrationTypeCustomSigner: + if registration.CustomSigner == nil { + continue + } + if csr.Spec.SignerName == registration.CustomSigner.SignerName { + return CustomSignerWithExpiry(a.hubKubeClient, registration.CustomSigner, 24*time.Hour)(csr) + } + + default: + utilruntime.HandleError(fmt.Errorf("unsupported registration type %s", registration.Type)) + } + + } + + return nil + } +} + +func CustomSignerWithExpiry( + kubeclient kubernetes.Interface, + customSignerConfig *addonapiv1alpha1.CustomSignerRegistrationConfig, + duration time.Duration) agent.CSRSignerFunc { + return func(csr *certificatesv1.CertificateSigningRequest) []byte { + if customSignerConfig == nil { + utilruntime.HandleError(fmt.Errorf("custome signer is nil")) + return nil + } + + if csr.Spec.SignerName != customSignerConfig.SignerName { + return nil + } + caSecret, err := kubeclient.CoreV1().Secrets(customSignerConfig.SigningCA.Namespace).Get( + context.TODO(), customSignerConfig.SigningCA.Name, metav1.GetOptions{}) + if err != nil { + utilruntime.HandleError(fmt.Errorf("get custome signer ca %s/%s failed, %v", + customSignerConfig.SigningCA.Namespace, customSignerConfig.SigningCA.Name, err)) + return nil + } + + caData, caKey, err := extractCAdata(caSecret.Data[TLSCACert], caSecret.Data[TLSCAKey]) + if err != nil { + utilruntime.HandleError(fmt.Errorf("get ca %s/%s data failed, %v", + customSignerConfig.SigningCA.Namespace, customSignerConfig.SigningCA.Name, err)) + return nil + } + return utils.DefaultSignerWithExpiry(caKey, caData, duration)(csr) + } +} + +func extractCAdata(caCertData, caKeyData []byte) ([]byte, []byte, error) { + certBlock, _ := pem.Decode(caCertData) + if certBlock == nil { + return nil, nil, errors.New("failed to decode ca cert") + } + caCert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse ca certificate") + } + keyBlock, _ := pem.Decode(caKeyData) + if keyBlock == nil { + return nil, nil, errors.New("failed to decode ca key") + } + caKey, err := x509.ParsePKCS8PrivateKey(keyBlock.Bytes) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse ca key") + } + + caConfig := &openshiftcrypto.TLSCertificateConfig{ + Certs: []*x509.Certificate{caCert}, + Key: caKey, + } + return caConfig.GetPEMBytes() +} + +// TemplatePermissionConfigFunc returns a func that can grant permission for addon agent +// that is deployed by addon template. +// the returned func will create a rolebinding to bind the clusterRole/role which is +// specified by the user, so the user is required to make sure the existence of the +// clusterRole/role +func (a *CRDTemplateAgentAddon) TemplatePermissionConfigFunc() agent.PermissionConfigFunc { + + return func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + template, err := a.GetDesiredAddOnTemplate(addon, cluster.Name, a.addonName) + if err != nil { + return err + } + if template == nil { + return nil + } + + for _, registration := range template.Spec.Registration { + switch registration.Type { + case addonapiv1alpha1.RegistrationTypeKubeClient: + kcrc := registration.KubeClient + if kcrc == nil { + continue + } + + err := a.createKubeClientPermissions(template.Name, kcrc, cluster, addon) + if err != nil { + return err + } + + case addonapiv1alpha1.RegistrationTypeCustomSigner: + continue + + default: + utilruntime.HandleError(fmt.Errorf("unsupported registration type %s", registration.Type)) + } + + } + + return nil + } +} + +func (a *CRDTemplateAgentAddon) createKubeClientPermissions( + templateName string, + kcrc *addonapiv1alpha1.KubeClientRegistrationConfig, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, +) error { + + for _, pc := range kcrc.HubPermissions { + switch pc.Type { + case addonapiv1alpha1.HubPermissionsBindingCurrentCluster: + klog.V(5).Infof("Set hub permission for addon %s/%s, UID: %s, APIVersion: %s, Kind: %s", + addon.Namespace, addon.Name, addon.UID, addon.APIVersion, addon.Kind) + + owner := metav1.OwnerReference{ + // TODO: use apiVersion and kind in addon object, but now they could be empty at some unknown reason + APIVersion: "addon.open-cluster-management.io/v1alpha1", + Kind: "ManagedClusterAddOn", + Name: addon.Name, + UID: addon.UID, + } + err := a.createPermissionBinding(templateName, + cluster.Name, addon.Name, cluster.Name, pc.RoleRef, &owner) + if err != nil { + return err + } + case addonapiv1alpha1.HubPermissionsBindingSingleNamespace: + if pc.SingleNamespace == nil { + return fmt.Errorf("single namespace is nil") + } + // set owner reference nil since the rolebinding has different namespace with the ManagedClusterAddon + // TODO: cleanup the rolebinding when the addon is deleted + err := a.createPermissionBinding(templateName, + cluster.Name, addon.Name, pc.SingleNamespace.Namespace, pc.RoleRef, nil) + if err != nil { + return err + } + } + } + return nil +} + +func (a *CRDTemplateAgentAddon) createPermissionBinding(templateName, clusterName, addonName, namespace string, + roleRef rbacv1.RoleRef, owner *metav1.OwnerReference) error { + // TODO: confirm the group + groups := agent.DefaultGroups(clusterName, addonName) + subject := []rbacv1.Subject{} + for _, group := range groups { + subject = append(subject, rbacv1.Subject{ + Kind: "Group", APIGroup: "rbac.authorization.k8s.io", Name: group, + }) + } + binding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("open-cluster-management:%s:%s:agent", + addonName, strings.ToLower(roleRef.Kind)), + Namespace: namespace, + Labels: map[string]string{ + addonapiv1alpha1.AddonLabelKey: addonName, + AddonTemplateLabelKey: "", + }, + }, + RoleRef: roleRef, + Subjects: subject, + } + if owner != nil { + binding.OwnerReferences = []metav1.OwnerReference{*owner} + } + _, err := a.rolebindingLister.RoleBindings(namespace).Get(binding.Name) + switch { + case err == nil: + // TODO: update the rolebinding if it is not the same + klog.Infof("rolebinding %s already exists", binding.Name) + return nil + case apierrors.IsNotFound(err): + _, createErr := a.hubKubeClient.RbacV1().RoleBindings(namespace).Create( + context.TODO(), binding, metav1.CreateOptions{}) + if createErr != nil && !apierrors.IsAlreadyExists(createErr) { + return createErr + } + case err != nil: + return err + } + + return nil +} diff --git a/pkg/addon/templateagent/registration_test.go b/pkg/addon/templateagent/registration_test.go new file mode 100644 index 000000000..0df3f77d1 --- /dev/null +++ b/pkg/addon/templateagent/registration_test.go @@ -0,0 +1,611 @@ +package templateagent + +import ( + "bytes" + "context" + "fmt" + "strings" + "testing" + "time" + + certificatesv1 "k8s.io/api/certificates/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + fakekube "k8s.io/client-go/kubernetes/fake" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +func TestTemplateCSRConfigurationsFunc(t *testing.T) { + cases := []struct { + name string + agentName string + cluster *clusterv1.ManagedCluster + addon *addonapiv1alpha1.ManagedClusterAddOn + template *addonapiv1alpha1.AddOnTemplate + expectedConfigs []addonapiv1alpha1.RegistrationConfig + }{ + { + name: "empty", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "", ""), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{}), + expectedConfigs: []addonapiv1alpha1.RegistrationConfig{}, + }, + { + name: "kubeclient", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeKubeClient, + KubeClient: &addonapiv1alpha1.KubeClientRegistrationConfig{ + HubPermissions: []addonapiv1alpha1.HubPermissionConfig{ + { + Type: addonapiv1alpha1.HubPermissionsBindingSingleNamespace, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "test", + }, + SingleNamespace: &addonapiv1alpha1.SingleNamespaceBindingConfig{ + Namespace: "test", + }, + }, + }, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + expectedConfigs: []addonapiv1alpha1.RegistrationConfig{ + { + SignerName: "kubernetes.io/kube-apiserver-client", + Subject: addonapiv1alpha1.Subject{ + User: "system:open-cluster-management:cluster:cluster1:addon:addon1:agent:agent1", + + Groups: []string{ + "system:open-cluster-management:cluster:cluster1:addon:addon1", + "system:open-cluster-management:addon:addon1", + "system:authenticated", + }, + OrganizationUnits: []string{}, + }, + }, + }, + }, + { + name: "customsigner", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeCustomSigner, + CustomSigner: &addonapiv1alpha1.CustomSignerRegistrationConfig{ + SignerName: "s1", + Subject: &addonapiv1alpha1.Subject{ + User: "u1", + Groups: []string{ + "g1", + "g2", + }, + OrganizationUnits: []string{}, + }, + SigningCA: addonapiv1alpha1.SigningCARef{ + Namespace: "ns1", + Name: "name1"}, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + expectedConfigs: []addonapiv1alpha1.RegistrationConfig{ + { + SignerName: "s1", + Subject: addonapiv1alpha1.Subject{ + User: "u1", + Groups: []string{ + "g1", + "g2", + }, + OrganizationUnits: []string{}, + }, + }, + }, + }, + } + for _, c := range cases { + addonClient := fakeaddon.NewSimpleClientset(c.template, c.addon) + addonInformerFactory := addoninformers.NewSharedInformerFactory(addonClient, 30*time.Minute) + mcaStore := addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore() + if err := mcaStore.Add(c.addon); err != nil { + t.Fatal(err) + } + atStore := addonInformerFactory.Addon().V1alpha1().AddOnTemplates().Informer().GetStore() + if err := atStore.Add(c.template); err != nil { + t.Fatal(err) + } + + agent := NewCRDTemplateAgentAddon(c.addon.Name, c.agentName, nil, addonClient, addonInformerFactory, nil, nil) + f := agent.TemplateCSRConfigurationsFunc() + registrationConfigs := f(c.cluster) + if !equality.Semantic.DeepEqual(registrationConfigs, c.expectedConfigs) { + t.Errorf("expected registrationConfigs %v, but got %v", c.expectedConfigs, registrationConfigs) + } + } +} + +func TestTemplateCSRApproveCheckFunc(t *testing.T) { + cases := []struct { + name string + agentName string + cluster *clusterv1.ManagedCluster + addon *addonapiv1alpha1.ManagedClusterAddOn + template *addonapiv1alpha1.AddOnTemplate + csr *certificatesv1.CertificateSigningRequest + expectedApprove bool + }{ + { + name: "empty", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "", ""), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{}), + expectedApprove: false, + }, + { + name: "kubeclient", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeKubeClient, + KubeClient: &addonapiv1alpha1.KubeClientRegistrationConfig{ + HubPermissions: []addonapiv1alpha1.HubPermissionConfig{ + { + Type: addonapiv1alpha1.HubPermissionsBindingSingleNamespace, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "test", + }, + SingleNamespace: &addonapiv1alpha1.SingleNamespaceBindingConfig{ + Namespace: "test", + }, + }, + }, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + csr: &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csr1", + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "kubernetes.io/kube-apiserver-client", + }, + }, + expectedApprove: false, // fake csr data + }, + { + name: "customsigner", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeCustomSigner, + CustomSigner: &addonapiv1alpha1.CustomSignerRegistrationConfig{ + SignerName: "s1", + Subject: &addonapiv1alpha1.Subject{ + User: "u1", + Groups: []string{ + "g1", + "g2", + }, + OrganizationUnits: []string{}, + }, + SigningCA: addonapiv1alpha1.SigningCARef{ + Namespace: "ns1", + Name: "name1"}, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + csr: &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csr1", + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "s1", + }, + }, + expectedApprove: true, + }, + } + for _, c := range cases { + addonClient := fakeaddon.NewSimpleClientset(c.template, c.addon) + addonInformerFactory := addoninformers.NewSharedInformerFactory(addonClient, 30*time.Minute) + mcaStore := addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore() + if err := mcaStore.Add(c.addon); err != nil { + t.Fatal(err) + } + atStore := addonInformerFactory.Addon().V1alpha1().AddOnTemplates().Informer().GetStore() + if err := atStore.Add(c.template); err != nil { + t.Fatal(err) + } + agent := NewCRDTemplateAgentAddon(c.addon.Name, c.agentName, nil, addonClient, addonInformerFactory, nil, nil) + f := agent.TemplateCSRApproveCheckFunc() + approve := f(c.cluster, c.addon, c.csr) + if approve != c.expectedApprove { + t.Errorf("expected approve result %v, but got %v", c.expectedApprove, approve) + } + } +} + +func TestTemplateCSRSignFunc(t *testing.T) { + cases := []struct { + name string + agentName string + cluster *clusterv1.ManagedCluster + addon *addonapiv1alpha1.ManagedClusterAddOn + template *addonapiv1alpha1.AddOnTemplate + csr *certificatesv1.CertificateSigningRequest + expectedCert []byte + }{ + { + name: "kubeclient", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeKubeClient, + KubeClient: &addonapiv1alpha1.KubeClientRegistrationConfig{ + HubPermissions: []addonapiv1alpha1.HubPermissionConfig{ + { + Type: addonapiv1alpha1.HubPermissionsBindingSingleNamespace, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "test", + }, + SingleNamespace: &addonapiv1alpha1.SingleNamespaceBindingConfig{ + Namespace: "test", + }, + }, + }, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + csr: &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csr1", + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "kubernetes.io/kube-apiserver-client", + Username: "system:open-cluster-management:cluster1:adcde", + }, + }, + expectedCert: nil, + }, + { + name: "customsigner no ca secret", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeCustomSigner, + CustomSigner: &addonapiv1alpha1.CustomSignerRegistrationConfig{ + SignerName: "s1", + Subject: &addonapiv1alpha1.Subject{ + User: "u1", + Groups: []string{ + "g1", + "g2", + }, + OrganizationUnits: []string{}, + }, + SigningCA: addonapiv1alpha1.SigningCARef{ + Namespace: "ns1", + Name: "name1"}, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + csr: &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csr1", + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "s1", + Username: "system:open-cluster-management:cluster1:adcde", + }, + }, + expectedCert: nil, + }, + } + for _, c := range cases { + addonClient := fakeaddon.NewSimpleClientset(c.template, c.addon) + hubKubeClient := fakekube.NewSimpleClientset() + addonInformerFactory := addoninformers.NewSharedInformerFactory(addonClient, 30*time.Minute) + mcaStore := addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore() + if err := mcaStore.Add(c.addon); err != nil { + t.Fatal(err) + } + atStore := addonInformerFactory.Addon().V1alpha1().AddOnTemplates().Informer().GetStore() + if err := atStore.Add(c.template); err != nil { + t.Fatal(err) + } + + agent := NewCRDTemplateAgentAddon(c.addon.Name, c.agentName, hubKubeClient, addonClient, addonInformerFactory, nil, nil) + f := agent.TemplateCSRSignFunc() + cert := f(c.csr) + if !bytes.Equal(cert, c.expectedCert) { + t.Errorf("expected cert %v, but got %v", c.expectedCert, cert) + } + } +} + +func NewFakeManagedCluster(name string) *clusterv1.ManagedCluster { + return &clusterv1.ManagedCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "ManagedCluster", + APIVersion: clusterv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: clusterv1.ManagedClusterSpec{}, + } +} + +func NewFakeTemplateManagedClusterAddon(name, clusterName, addonTemplateName, addonTemplateSpecHash string) *addonapiv1alpha1.ManagedClusterAddOn { + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: clusterName, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{}, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{}, + } + + if addonTemplateName != "" { + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addontemplates", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: addonTemplateName, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: addonTemplateName, + }, + SpecHash: addonTemplateSpecHash, + }, + }, + } + } + return addon +} + +func NewFakeAddonTemplate(name string, + registrationSpec []addonapiv1alpha1.RegistrationSpec) *addonapiv1alpha1.AddOnTemplate { + return &addonapiv1alpha1.AddOnTemplate{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: addonapiv1alpha1.AddOnTemplateSpec{ + Registration: registrationSpec, + }, + } +} + +func NewFakeRoleBinding(addonName, namespace string, subject []rbacv1.Subject, roleRef rbacv1.RoleRef, + owner metav1.OwnerReference) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("open-cluster-management:%s:%s:agent", + addonName, strings.ToLower(roleRef.Kind)), + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{owner}, + Labels: map[string]string{ + addonapiv1alpha1.AddonLabelKey: addonName, + }, + }, + RoleRef: roleRef, + Subjects: subject, + } +} + +func TestTemplatePermissionConfigFunc(t *testing.T) { + cases := []struct { + name string + agentName string + cluster *clusterv1.ManagedCluster + addon *addonapiv1alpha1.ManagedClusterAddOn + template *addonapiv1alpha1.AddOnTemplate + rolebinding *rbacv1.RoleBinding + expectedErr error + validatePermissionFunc func(*testing.T, kubernetes.Interface) + }{ + { + name: "kubeclient current cluster binding", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeKubeClient, + KubeClient: &addonapiv1alpha1.KubeClientRegistrationConfig{ + HubPermissions: []addonapiv1alpha1.HubPermissionConfig{ + { + Type: addonapiv1alpha1.HubPermissionsBindingCurrentCluster, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: "test", + }, + }, + }, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + rolebinding: NewFakeRoleBinding("addon1", "cluster1", + []rbacv1.Subject{{ + Kind: "Group", + APIGroup: "rbac.authorization.k8s.io", + Name: "system:authenticated"}, + }, rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: "test", + }, + metav1.OwnerReference{ + APIVersion: "addon.open-cluster-management.io/v1alpha1", + Kind: "ManagedClusterAddOn", + Name: "addon1", + UID: "fakeuid", + }), + expectedErr: nil, + validatePermissionFunc: func(t *testing.T, kubeClient kubernetes.Interface) { + rb, err := kubeClient.RbacV1().RoleBindings("cluster1").Get(context.TODO(), + fmt.Sprintf("open-cluster-management:%s:%s:agent", "addon1", strings.ToLower("Role")), + metav1.GetOptions{}, + ) + if err != nil { + t.Errorf("failed to get rolebinding: %v", err) + } + + if rb.RoleRef.Name != "test" { + t.Errorf("expected rolebinding %s, got %s", "test", rb.RoleRef.Name) + } + if len(rb.OwnerReferences) != 1 { + t.Errorf("expected rolebinding to have 1 owner reference, got %d", len(rb.OwnerReferences)) + } + if rb.OwnerReferences[0].Kind != "ManagedClusterAddOn" { + t.Errorf("expected rolebinding owner reference kind to be ManagedClusterAddOn, got %s", + rb.OwnerReferences[0].Kind) + } + if rb.OwnerReferences[0].Name != "addon1" { + t.Errorf("expected rolebinding owner reference name to be addon1, got %s", + rb.OwnerReferences[0].Name) + } + }, + }, + { + name: "kubeclient single namespace binding", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeKubeClient, + KubeClient: &addonapiv1alpha1.KubeClientRegistrationConfig{ + HubPermissions: []addonapiv1alpha1.HubPermissionConfig{ + { + Type: addonapiv1alpha1.HubPermissionsBindingSingleNamespace, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "test", + }, + SingleNamespace: &addonapiv1alpha1.SingleNamespaceBindingConfig{ + Namespace: "test", + }, + }, + }, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + expectedErr: nil, + validatePermissionFunc: func(t *testing.T, kubeClient kubernetes.Interface) { + rb, err := kubeClient.RbacV1().RoleBindings("test").Get(context.TODO(), + fmt.Sprintf("open-cluster-management:%s:%s:agent", "addon1", strings.ToLower("ClusterRole")), + metav1.GetOptions{}, + ) + if err != nil { + t.Errorf("failed to get rolebinding: %v", err) + } + + if rb.RoleRef.Name != "test" { + t.Errorf("expected rolebinding %s, got %s", "test", rb.RoleRef.Name) + } + if len(rb.OwnerReferences) != 0 { + t.Errorf("expected rolebinding to have 0 owner reference, got %d", len(rb.OwnerReferences)) + } + }, + }, + { + name: "customsigner", + agentName: "agent1", + cluster: NewFakeManagedCluster("cluster1"), + template: NewFakeAddonTemplate("template1", []addonapiv1alpha1.RegistrationSpec{ + { + Type: addonapiv1alpha1.RegistrationTypeCustomSigner, + CustomSigner: &addonapiv1alpha1.CustomSignerRegistrationConfig{ + SignerName: "s1", + Subject: &addonapiv1alpha1.Subject{ + User: "u1", + Groups: []string{ + "g1", + "g2", + }, + OrganizationUnits: []string{}, + }, + SigningCA: addonapiv1alpha1.SigningCARef{ + Namespace: "ns1", + Name: "name1"}, + }, + }, + }), + addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), + expectedErr: nil, + }, + } + for _, c := range cases { + addonClient := fakeaddon.NewSimpleClientset(c.template, c.addon) + hubKubeClient := fakekube.NewSimpleClientset() + if c.rolebinding != nil { + hubKubeClient = fakekube.NewSimpleClientset(c.rolebinding) + } + addonInformerFactory := addoninformers.NewSharedInformerFactory(addonClient, 30*time.Minute) + mcaStore := addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore() + if err := mcaStore.Add(c.addon); err != nil { + t.Fatal(err) + } + atStore := addonInformerFactory.Addon().V1alpha1().AddOnTemplates().Informer().GetStore() + if err := atStore.Add(c.template); err != nil { + t.Fatal(err) + } + kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(hubKubeClient, 10*time.Minute) + if c.rolebinding != nil { + + rbStore := kubeInformers.Rbac().V1().RoleBindings().Informer().GetStore() + if err := rbStore.Add(c.rolebinding); err != nil { + t.Fatal(err) + } + } + + agent := NewCRDTemplateAgentAddon(c.addon.Name, c.agentName, hubKubeClient, addonClient, addonInformerFactory, + kubeInformers.Rbac().V1().RoleBindings().Lister(), nil) + f := agent.TemplatePermissionConfigFunc() + err := f(c.cluster, c.addon) + if err != c.expectedErr { + t.Errorf("expected registrationConfigs %v, but got %v", c.expectedErr, err) + } + if c.validatePermissionFunc != nil { + c.validatePermissionFunc(t, hubKubeClient) + } + } +} diff --git a/pkg/addon/templateagent/template_agent.go b/pkg/addon/templateagent/template_agent.go new file mode 100644 index 000000000..c558e1932 --- /dev/null +++ b/pkg/addon/templateagent/template_agent.go @@ -0,0 +1,220 @@ +package templateagent + +import ( + "fmt" + + "github.com/valyala/fasttemplate" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + rbacv1lister "k8s.io/client-go/listers/rbac/v1" + "k8s.io/klog/v2" + + "open-cluster-management.io/addon-framework/pkg/addonfactory" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +const ( + NodePlacementPrivateValueKey = "__NODE_PLACEMENT" + RegistriesPrivateValueKey = "__REGISTRIES" +) + +// templateBuiltinValues includes the built-in values for crd template agentAddon. +// the values for template config should begin with an uppercase letter, so we need +// to convert it to Values by JsonStructToValues. +// the built-in values can not be overridden by getValuesFuncs +type templateCRDBuiltinValues struct { + ClusterName string `json:"CLUSTER_NAME,omitempty"` + AddonInstallNamespace string `json:"INSTALL_NAMESPACE,omitempty"` +} + +// templateDefaultValues includes the default values for crd template agentAddon. +// the values for template config should begin with an uppercase letter, so we need +// to convert it to Values by JsonStructToValues. +// the default values can be overridden by getValuesFuncs +type templateCRDDefaultValues struct { + HubKubeConfigPath string `json:"HUB_KUBECONFIG,omitempty"` + ManagedKubeConfigPath string `json:"MANAGED_KUBECONFIG,omitempty"` +} + +type CRDTemplateAgentAddon struct { + getValuesFuncs []addonfactory.GetValuesFunc + trimCRDDescription bool + + hubKubeClient kubernetes.Interface + addonClient addonv1alpha1client.Interface + addonLister addonlisterv1alpha1.ManagedClusterAddOnLister + addonTemplateLister addonlisterv1alpha1.AddOnTemplateLister + rolebindingLister rbacv1lister.RoleBindingLister + addonName string + agentName string +} + +// NewCRDTemplateAgentAddon creates a CRDTemplateAgentAddon instance +func NewCRDTemplateAgentAddon( + addonName, agentName string, + hubKubeClient kubernetes.Interface, + addonClient addonv1alpha1client.Interface, + addonInformers addoninformers.SharedInformerFactory, + rolebindingLister rbacv1lister.RoleBindingLister, + getValuesFuncs ...addonfactory.GetValuesFunc, +) *CRDTemplateAgentAddon { + + a := &CRDTemplateAgentAddon{ + getValuesFuncs: getValuesFuncs, + trimCRDDescription: true, + + hubKubeClient: hubKubeClient, + addonClient: addonClient, + addonLister: addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Lister(), + addonTemplateLister: addonInformers.Addon().V1alpha1().AddOnTemplates().Lister(), + rolebindingLister: rolebindingLister, + addonName: addonName, + agentName: agentName, + } + + return a +} + +func (a *CRDTemplateAgentAddon) Manifests( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + + template, err := a.GetDesiredAddOnTemplateByAddon(addon) + if err != nil { + return nil, err + } + if template == nil { + return nil, fmt.Errorf("addon %s/%s template not found in status", addon.Namespace, addon.Name) + } + return a.renderObjects(cluster, addon, template) +} + +func (a *CRDTemplateAgentAddon) GetAgentAddonOptions() agent.AgentAddonOptions { + // TODO: consider a new way for developers to define their supported config GVRs + supportedConfigGVRs := []schema.GroupVersionResource{} + for gvr := range utils.BuiltInAddOnConfigGVRs { + supportedConfigGVRs = append(supportedConfigGVRs, gvr) + } + return agent.AgentAddonOptions{ + AddonName: a.addonName, + InstallStrategy: nil, + HealthProber: nil, + SupportedConfigGVRs: supportedConfigGVRs, + Registration: &agent.RegistrationOption{ + CSRConfigurations: a.TemplateCSRConfigurationsFunc(), + PermissionConfig: a.TemplatePermissionConfigFunc(), + CSRApproveCheck: a.TemplateCSRApproveCheckFunc(), + CSRSign: a.TemplateCSRSignFunc(), + }, + } +} + +func (a *CRDTemplateAgentAddon) renderObjects( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, + template *addonapiv1alpha1.AddOnTemplate) ([]runtime.Object, error) { + var objects []runtime.Object + presetValues, configValues, privateValues, err := a.getValues(cluster, addon, template) + if err != nil { + return objects, err + } + klog.V(4).Infof("presetValues %v\t configValues: %v\t privateValues: %v", presetValues, configValues, privateValues) + + for _, manifest := range template.Spec.AgentSpec.Workload.Manifests { + t := fasttemplate.New(string(manifest.Raw), "{{", "}}") + manifestStr := t.ExecuteString(configValues) + klog.V(4).Infof("addon %s/%s render result: %v", addon.Namespace, addon.Name, manifestStr) + object := &unstructured.Unstructured{} + if err := object.UnmarshalJSON([]byte(manifestStr)); err != nil { + return objects, err + } + objects = append(objects, object) + } + + objects, err = a.decorateObjects(template, objects, presetValues, configValues, privateValues) + if err != nil { + return objects, err + } + return objects, nil +} + +func (a *CRDTemplateAgentAddon) decorateObjects( + template *addonapiv1alpha1.AddOnTemplate, + objects []runtime.Object, + orderedValues orderedValues, + configValues, privateValues addonfactory.Values) ([]runtime.Object, error) { + decorators := []deploymentDecorator{ + newEnvironmentDecorator(orderedValues), + newVolumeDecorator(a.addonName, template), + newNodePlacementDecorator(privateValues), + newImageDecorator(privateValues), + } + for index, obj := range objects { + deployment, err := a.convertToDeployment(obj) + if err != nil { + continue + } + + for _, decorator := range decorators { + err = decorator.decorate(deployment) + if err != nil { + return objects, err + } + } + objects[index] = deployment + } + + return objects, nil +} + +func (a *CRDTemplateAgentAddon) convertToDeployment(obj runtime.Object) (*appsv1.Deployment, error) { + if obj.GetObjectKind().GroupVersionKind().Group != "apps" || + obj.GetObjectKind().GroupVersionKind().Kind != "Deployment" { + return nil, fmt.Errorf("not deployment object, %v", obj.GetObjectKind()) + } + + deployment := &appsv1.Deployment{} + uobj, ok := obj.(*unstructured.Unstructured) + if !ok { + return deployment, fmt.Errorf("not unstructured object, %v", obj.GetObjectKind()) + } + + err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uobj.Object, deployment) + if err != nil { + return nil, err + } + return deployment, nil +} + +// GetDesiredAddOnTemplateByAddon returns the desired template of the addon +func (a *CRDTemplateAgentAddon) GetDesiredAddOnTemplateByAddon( + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.AddOnTemplate, error) { + ok, templateRef := AddonTemplateConfigRef(addon.Status.ConfigReferences) + if !ok { + klog.V(4).Infof("Addon %s template config in status is empty", addon.Name) + return nil, nil + } + + desiredTemplate := templateRef.DesiredConfig + if desiredTemplate == nil || desiredTemplate.SpecHash == "" { + klog.Infof("Addon %s template spec hash is empty", addon.Name) + return nil, fmt.Errorf("addon %s template desired spec hash is empty", addon.Name) + } + + template, err := a.addonTemplateLister.Get(desiredTemplate.Name) + if err != nil { + return nil, err + } + + return template.DeepCopy(), nil +} diff --git a/pkg/addon/templateagent/template_agent_test.go b/pkg/addon/templateagent/template_agent_test.go new file mode 100644 index 000000000..60d599985 --- /dev/null +++ b/pkg/addon/templateagent/template_agent_test.go @@ -0,0 +1,232 @@ +package templateagent + +import ( + "os" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + kubeinformers "k8s.io/client-go/informers" + fakekube "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" + + "open-cluster-management.io/addon-framework/pkg/addonfactory" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterv1apha1 "open-cluster-management.io/api/cluster/v1alpha1" +) + +func TestAddonTemplateAgent_Manifests(t *testing.T) { + addonName := "hello" + clusterName := "cluster1" + data, err := os.ReadFile("./testmanifests/addontemplate.yaml") + if err != nil { + t.Errorf("error reading file: %v", err) + } + + s := runtime.NewScheme() + _ = scheme.AddToScheme(s) + _ = clusterv1apha1.Install(s) + _ = addonapiv1alpha1.Install(s) + + addonTemplate := &addonapiv1alpha1.AddOnTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello-template", + }, + } + + addonTemplateSpecHash, err := GetTemplateSpecHash(addonTemplate) + if err != nil { + t.Errorf("error getting template spec hash: %v", err) + } + addonDeploymentConfig := &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello-config", + Namespace: "default", + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{ + { + Name: "LOG_LEVEL", + Value: "4", + }, + }, + NodePlacement: &addonapiv1alpha1.NodePlacement{ + NodeSelector: map[string]string{ + "host": "ssd", + }, + Tolerations: []corev1.Toleration{ + { + Key: "foo", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + Registries: []addonapiv1alpha1.ImageMirror{ + { + Source: "quay.io/open-cluster-management", + Mirror: "quay.io/ocm", + }, + }, + }, + } + + managedClusterAddon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: addonName, + Namespace: clusterName, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + ConfigReferences: []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addontemplates", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: "hello-template", + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: "hello-template", + }, + SpecHash: addonTemplateSpecHash, + }, + }, + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addondeploymentconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: "hello-config", + Namespace: "default", + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: "hello-config", + Namespace: "default", + }, + }, + }, + }, + }, + } + decoder := serializer.NewCodecFactory(s).UniversalDeserializer() + _, _, err = decoder.Decode(data, nil, addonTemplate) + if err != nil { + t.Errorf("error decoding file: %v", err) + } + + hubKubeClient := fakekube.NewSimpleClientset() + addonClient := fakeaddon.NewSimpleClientset(addonTemplate, managedClusterAddon, addonDeploymentConfig) + addonInformerFactory := addoninformers.NewSharedInformerFactory(addonClient, 30*time.Minute) + mcaStore := addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore() + if err := mcaStore.Add(managedClusterAddon); err != nil { + t.Fatal(err) + } + atStore := addonInformerFactory.Addon().V1alpha1().AddOnTemplates().Informer().GetStore() + if err := atStore.Add(addonTemplate); err != nil { + t.Fatal(err) + } + kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(hubKubeClient, 10*time.Minute) + + agentAddon := NewCRDTemplateAgentAddon( + addonName, + "test-agent", + hubKubeClient, + addonClient, + addonInformerFactory, + kubeInformers.Rbac().V1().RoleBindings().Lister(), + addonfactory.GetAddOnDeploymentConfigValues( + addonfactory.NewAddOnDeploymentConfigGetter(addonClient), + addonfactory.ToAddOnCustomizedVariableValues, + ToAddOnNodePlacementPrivateValues, + ToAddOnRegistriesPrivateValues, + ), + ) + + cluster := addonfactory.NewFakeManagedCluster("cluster1", "1.10.1") + + objects, err := agentAddon.Manifests(cluster, managedClusterAddon) + if err != nil { + t.Errorf("expected no error, got err %v", err) + } + if len(objects) != 4 { + t.Errorf("expected 4 objects, but got %v", len(objects)) + } + + object, ok := objects[0].(*appsv1.Deployment) + if !ok { + t.Errorf("expected object to be *appsv1.Deployment, but got %T", objects[0]) + } + + nodeSelector := object.Spec.Template.Spec.NodeSelector + expectedNodeSelector := map[string]string{"host": "ssd"} + if !equality.Semantic.DeepEqual(nodeSelector, expectedNodeSelector) { + t.Errorf("unexpected nodeSelector %v", nodeSelector) + } + + tolerations := object.Spec.Template.Spec.Tolerations + expectedTolerations := []corev1.Toleration{{Key: "foo", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute}} + if !equality.Semantic.DeepEqual(tolerations, expectedTolerations) { + t.Errorf("unexpected tolerations %v", tolerations) + } + + envs := object.Spec.Template.Spec.Containers[0].Env + expectedEnvs := []corev1.EnvVar{ + {Name: "LOG_LEVEL", Value: "4"}, + {Name: "HUB_KUBECONFIG", Value: "/managed/hub-kubeconfig/kubeconfig"}, + {Name: "CLUSTER_NAME", Value: clusterName}, + {Name: "INSTALL_NAMESPACE", Value: "open-cluster-management-agent-addon"}, + } + if !equality.Semantic.DeepEqual(envs, expectedEnvs) { + t.Errorf("unexpected envs %v", envs) + } + + volumes := object.Spec.Template.Spec.Volumes + expectedVolumes := []corev1.Volume{ + { + Name: "hub-kubeconfig", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hello-hub-kubeconfig", + }, + }, + }, + { + Name: "cert-example-com-signer-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "hello-example.com-signer-name-client-cert", + }, + }, + }, + } + + if !equality.Semantic.DeepEqual(volumes, expectedVolumes) { + t.Errorf("expected volumes %v, but got: %v", expectedVolumes, volumes) + } + + volumeMounts := object.Spec.Template.Spec.Containers[0].VolumeMounts + expectedVolumeMounts := []corev1.VolumeMount{ + { + Name: "hub-kubeconfig", + MountPath: "/managed/hub-kubeconfig", + }, + { + Name: "cert-example-com-signer-name", + MountPath: "/managed/example.com-signer-name", + }, + } + if !equality.Semantic.DeepEqual(volumeMounts, expectedVolumeMounts) { + t.Errorf("expected volumeMounts %v, but got: %v", expectedVolumeMounts, volumeMounts) + } +} diff --git a/pkg/addon/templateagent/testmanifests/addontemplate.yaml b/pkg/addon/templateagent/testmanifests/addontemplate.yaml new file mode 100644 index 000000000..ad456075c --- /dev/null +++ b/pkg/addon/templateagent/testmanifests/addontemplate.yaml @@ -0,0 +1,116 @@ +apiVersion: addon.open-cluster-management.io/v1alpha1 +kind: AddOnTemplate +metadata: + name: hello-template +spec: + addonName: hello-template + agentSpec: + workload: + manifests: + - kind: Deployment + apiVersion: apps/v1 + metadata: + name: hello-template-agent + namespace: open-cluster-management-agent-addon + annotations: + "addon.open-cluster-management.io/deletion-orphan": "" + labels: + app: hello-template-agent + spec: + replicas: 1 + selector: + matchLabels: + app: hello-template-agent + template: + metadata: + labels: + app: hello-template-agent + spec: + serviceAccountName: hello-template-agent-sa + containers: + - name: helloworld-agent + image: quay.io/open-cluster-management/addon-examples:v1 + imagePullPolicy: IfNotPresent + args: + - "/helloworld_helm" + - "agent" + - "--cluster-name={{CLUSTER_NAME}}" + - "--addon-namespace=open-cluster-management-agent-addon" + - "--addon-name=hello-template" + - "--hub-kubeconfig={{HUB_KUBECONFIG}}" + env: + - name: LOG_LEVEL + value: "{{LOG_LEVEL}}" # addonDeploymentConfig variables + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: hello-template-agent-sa + namespace: open-cluster-management-agent-addon + annotations: + "addon.open-cluster-management.io/deletion-orphan": "" + - kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: hello-template-agent + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: hello-template-agent-sa + namespace: open-cluster-management-agent-addon + - kind: Job + apiVersion: batch/v1 + metadata: + name: hello-template-cleanup-configmap + namespace: open-cluster-management-agent-addon + annotations: + "addon.open-cluster-management.io/addon-pre-delete": "" + spec: + manualSelector: true + selector: + matchLabels: + job: hello-template-cleanup-configmap + template: + metadata: + labels: + job: hello-template-cleanup-configmap + spec: + serviceAccountName: hello-template-agent-sa + restartPolicy: Never + containers: + - name: hello-template-agent + image: quay.io/open-cluster-management/addon-examples + imagePullPolicy: IfNotPresent + args: + - "/helloworld_helm" + - "cleanup" + - "--addon-namespace=open-cluster-management-agent-addon" + registration: + # kubeClient or custom signer, if kubeClient, user and group is in a certain format. + # user is "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}" + # group is ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}", + # "system:open-cluster-management:addon:{addonName}", "system:authenticated"] + - type: KubeClient + kubeClient: + hubPermissions: + - type: CurrentCluster + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cm-admin + - customSigner: + signerName: example.com/signer-name + signingCA: + name: ca-secret + namespace: default + subject: + groups: + - g1 + - g2 + organizationUnit: + - o1 + - o2 + user: user1 + type: CustomSigner \ No newline at end of file diff --git a/pkg/addon/templateagent/values.go b/pkg/addon/templateagent/values.go new file mode 100644 index 000000000..650bae12d --- /dev/null +++ b/pkg/addon/templateagent/values.go @@ -0,0 +1,156 @@ +package templateagent + +import ( + "fmt" + "sort" + + "open-cluster-management.io/addon-framework/pkg/addonfactory" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +// ToAddOnNodePlacementPrivateValues only transform the AddOnDeploymentConfig NodePlacement part into Values object +// with a specific key, this value would be used by the addon template controller +func ToAddOnNodePlacementPrivateValues(config addonapiv1alpha1.AddOnDeploymentConfig) (addonfactory.Values, error) { + if config.Spec.NodePlacement == nil { + return nil, nil + } + + return addonfactory.Values{ + NodePlacementPrivateValueKey: config.Spec.NodePlacement, + }, nil +} + +// ToAddOnRegistriesPrivateValues only transform the AddOnDeploymentConfig Registries part into Values object +// with a specific key, this value would be used by the addon template controller +func ToAddOnRegistriesPrivateValues(config addonapiv1alpha1.AddOnDeploymentConfig) (addonfactory.Values, error) { + if config.Spec.Registries == nil { + return nil, nil + } + + return addonfactory.Values{ + RegistriesPrivateValueKey: config.Spec.Registries, + }, nil +} + +type keyValuePair struct { + name string + value string +} + +type orderedValues []keyValuePair + +func (a *CRDTemplateAgentAddon) getValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, + template *addonapiv1alpha1.AddOnTemplate, +) (orderedValues, map[string]interface{}, map[string]interface{}, error) { + + presetValues := make([]keyValuePair, 0) + overrideValues := map[string]interface{}{} + privateValues := map[string]interface{}{} + + defaultSortedKeys, defaultValues, err := a.getDefaultValues(cluster, addon, template) + if err != nil { + return presetValues, overrideValues, privateValues, nil + } + overrideValues = addonfactory.MergeValues(overrideValues, defaultValues) + + privateValuesKeys := map[string]struct{}{ + NodePlacementPrivateValueKey: {}, + RegistriesPrivateValueKey: {}, + } + + for i := 0; i < len(a.getValuesFuncs); i++ { + if a.getValuesFuncs[i] != nil { + userValues, err := a.getValuesFuncs[i](cluster, addon) + if err != nil { + return nil, nil, nil, err + } + + publicValues := map[string]interface{}{} + for k, v := range userValues { + if _, ok := privateValuesKeys[k]; ok { + privateValues[k] = v + continue + } + publicValues[k] = v + } + + overrideValues = addonfactory.MergeValues(overrideValues, publicValues) + } + } + builtinSortedKeys, builtinValues, err := a.getBuiltinValues(cluster, addon) + if err != nil { + return presetValues, overrideValues, privateValues, nil + } + overrideValues = addonfactory.MergeValues(overrideValues, builtinValues) + + for k, v := range overrideValues { + _, ok := v.(string) + if !ok { + return nil, nil, nil, fmt.Errorf("only support string type for variables, invalid key %s", k) + } + } + + keys := append(defaultSortedKeys, builtinSortedKeys...) + + for _, key := range keys { + presetValues = append(presetValues, keyValuePair{ + name: key, + value: overrideValues[key].(string), + }) + } + return presetValues, overrideValues, privateValues, nil +} + +func (a *CRDTemplateAgentAddon) getBuiltinValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) ([]string, addonfactory.Values, error) { + builtinValues := templateCRDBuiltinValues{} + builtinValues.ClusterName = cluster.GetName() + + installNamespace := addon.Spec.InstallNamespace + if len(installNamespace) == 0 { + installNamespace = addonfactory.AddonDefaultInstallNamespace + } + builtinValues.AddonInstallNamespace = installNamespace + + value, err := addonfactory.JsonStructToValues(builtinValues) + if err != nil { + return nil, nil, err + } + return a.sortValueKeys(value), value, nil +} + +func (a *CRDTemplateAgentAddon) getDefaultValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, + template *addonapiv1alpha1.AddOnTemplate) ([]string, addonfactory.Values, error) { + defaultValues := templateCRDDefaultValues{} + + // TODO: hubKubeConfigSecret depends on the signer configuration in registration, and the registration is an array. + if template.Spec.Registration != nil { + defaultValues.HubKubeConfigPath = hubKubeconfigPath() + } + + value, err := addonfactory.JsonStructToValues(defaultValues) + if err != nil { + return nil, nil, err + } + return a.sortValueKeys(value), value, nil +} + +func (a *CRDTemplateAgentAddon) sortValueKeys(value addonfactory.Values) []string { + keys := make([]string, 0) + for k := range value { + keys = append(keys, k) + } + + sort.Strings(keys) + return keys +} + +func hubKubeconfigPath() string { + return "/managed/hub-kubeconfig/kubeconfig" +} diff --git a/pkg/cmd/hub/addon.go b/pkg/cmd/hub/addon.go new file mode 100644 index 000000000..11f320ac9 --- /dev/null +++ b/pkg/cmd/hub/addon.go @@ -0,0 +1,20 @@ +package hub + +import ( + "github.com/openshift/library-go/pkg/controller/controllercmd" + "github.com/spf13/cobra" + + "open-cluster-management.io/ocm/pkg/addon" + "open-cluster-management.io/ocm/pkg/version" +) + +// NewAddonManager generates a command to start addon manager +func NewAddonManager() *cobra.Command { + cmdConfig := controllercmd. + NewControllerCommandConfig("manager", version.Get(), addon.RunManager) + cmd := cmdConfig.NewCommand() + cmd.Use = "manager" + cmd.Short = "Start the Addon Manager" + + return cmd +} diff --git a/test/integration-test.mk b/test/integration-test.mk index b5be66151..305a480cf 100644 --- a/test/integration-test.mk +++ b/test/integration-test.mk @@ -46,5 +46,10 @@ test-registration-operator-integration: ensure-kubebuilder-tools ./registration-operator-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast .PHONY: test-registration-operator-integration -test-integration: test-registration-operator-integration test-registration-integration test-placement-integration test-work-integration +test-addon-integration: ensure-kubebuilder-tools + go test -c ./test/integration/addon -o ./addon-integration.test + ./addon-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast +.PHONY: test-addon-integration + +test-integration: test-registration-operator-integration test-registration-integration test-placement-integration test-work-integration test-addon-integration .PHONY: test-integration diff --git a/test/integration/addon/addon_configs_test.go b/test/integration/addon/addon_configs_test.go new file mode 100644 index 000000000..452ea44c6 --- /dev/null +++ b/test/integration/addon/addon_configs_test.go @@ -0,0 +1,488 @@ +package integration + +import ( + "context" + "fmt" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/rand" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +var _ = ginkgo.Describe("AddConfigs", func() { + var managedClusterName string + var configDefaultNamespace string + var configDefaultName string + var err error + + ginkgo.BeforeEach(func() { + suffix := rand.String(5) + managedClusterName = fmt.Sprintf("managedcluster-%s", suffix) + configDefaultNamespace = fmt.Sprintf("default-config-%s", suffix) + configDefaultName = fmt.Sprintf("default-config-%s", suffix) + + // prepare cluster + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + clusterNS := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), clusterNS, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // prepare ClusterManagementAddon + _, err = createClusterManagementAddOn(testAddOnConfigsImpl.name, configDefaultNamespace, configDefaultName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // prepare default config + configDefaultNS := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: configDefaultNamespace}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), configDefaultNS, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addOnDefaultConfig := &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: configDefaultName, + Namespace: configDefaultNamespace, + }, + Spec: addOnDefaultConfigSpec, + } + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create(context.Background(), addOnDefaultConfig, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), configDefaultNamespace, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), testAddOnConfigsImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + delete(testAddOnConfigsImpl.registrations, managedClusterName) + }) + + ginkgo.It("Should use default config", func() { + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddOnConfigsImpl.name, + Namespace: managedClusterName, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "test", + }, + } + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.Background(), addon, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // check cma status + assertClusterManagementAddOnDefaultConfigReferences(testAddOnConfigsImpl.name, addonapiv1alpha1.DefaultConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + }) + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name) + + // check mca status + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, managedClusterName, addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + LastObservedGeneration: 1, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + }) + }) + + ginkgo.It("Should override default config by install strategy", func() { + cma, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), testAddOnConfigsImpl.name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + cma.Annotations = map[string]string{ + addonapiv1alpha1.AddonLifecycleAnnotationKey: addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue, + } + cma.Spec.InstallStrategy = addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonapiv1alpha1.PlacementStrategy{ + { + PlacementRef: addonapiv1alpha1.PlacementRef{Name: "test-placement", Namespace: configDefaultNamespace}, + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: "another-config", + }, + }, + }, + RolloutStrategy: addonapiv1alpha1.RolloutStrategy{ + Type: addonapiv1alpha1.AddonRolloutStrategyUpdateAll, + }, + }, + }, + } + updateClusterManagementAddOn(context.Background(), cma) + + placement := &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: configDefaultNamespace}} + _, err = hubClusterClient.ClusterV1beta1().Placements(configDefaultNamespace).Create(context.Background(), placement, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision := &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: configDefaultNamespace, + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + } + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(configDefaultNamespace).Create(context.Background(), decision, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision.Status.Decisions = []clusterv1beta1.ClusterDecision{ + {ClusterName: managedClusterName}, + } + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(configDefaultNamespace).UpdateStatus(context.Background(), decision, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // check cma status + assertClusterManagementAddOnDefaultConfigReferences(testAddOnConfigsImpl.name, addonapiv1alpha1.DefaultConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + }) + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{ + PlacementRef: addonapiv1alpha1.PlacementRef{Name: "test-placement", Namespace: configDefaultNamespace}, + ConfigReferences: []addonapiv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: "another-config", + }, + SpecHash: "", + }, + }, + }, + }) + + // check mca status + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, managedClusterName, addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: "another-config", + }, + LastObservedGeneration: 0, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: "another-config", + }, + SpecHash: "", + }, + }) + }) + + ginkgo.It("Should override default config", func() { + addOnConfig := &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-config", + Namespace: managedClusterName, + }, + Spec: addOnTest1ConfigSpec, + } + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create(context.Background(), addOnConfig, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddOnConfigsImpl.name, + Namespace: managedClusterName, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "test", + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: addOnConfig.Name, + Namespace: addOnConfig.Namespace, + }, + }, + }, + }, + } + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.Background(), addon, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addon.Status = addonapiv1alpha1.ManagedClusterAddOnStatus{ + SupportedConfigs: []addonapiv1alpha1.ConfigGroupResource{ + { + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + }, + } + updateManagedClusterAddOnStatus(context.Background(), addon) + + // check cma status + assertClusterManagementAddOnDefaultConfigReferences(testAddOnConfigsImpl.name, addonapiv1alpha1.DefaultConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + }) + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name) + + // check mca status + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, managedClusterName, addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + LastObservedGeneration: 1, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }) + }) + + ginkgo.It("Should update config spec successfully", func() { + addOnConfig := &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-config", + Namespace: managedClusterName, + }, + Spec: addOnTest1ConfigSpec, + } + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create(context.Background(), addOnConfig, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddOnConfigsImpl.name, + Namespace: managedClusterName, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "test", + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: addOnConfig.Name, + Namespace: addOnConfig.Namespace, + }, + }, + }, + }, + } + addon, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.Background(), addon, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addon.Status = addonapiv1alpha1.ManagedClusterAddOnStatus{ + SupportedConfigs: []addonapiv1alpha1.ConfigGroupResource{ + { + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + }, + } + updateManagedClusterAddOnStatus(context.Background(), addon) + + // check mca status + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, managedClusterName, addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + LastObservedGeneration: 1, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }) + + addOnConfig, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Get(context.Background(), addOnConfig.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addOnConfig.Spec = addOnTest2ConfigSpec + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Update(context.Background(), addOnConfig, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // check mca status + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, managedClusterName, addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + LastObservedGeneration: 2, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + }) + }) + + ginkgo.It("Should not update unsupported config spec hash", func() { + addOnConfig := &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "addon-config", + Namespace: managedClusterName, + }, + Spec: addOnTest1ConfigSpec, + } + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create(context.Background(), addOnConfig, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // empty supported config + supportedConfig := testAddOnConfigsImpl.supportedConfigGVRs + testAddOnConfigsImpl.supportedConfigGVRs = []schema.GroupVersionResource{} + + // do not update mca status.SupportedConfigs + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddOnConfigsImpl.name, + Namespace: managedClusterName, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "test", + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Name: addOnConfig.Name, + Namespace: addOnConfig.Namespace, + }, + }, + }, + }, + } + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.Background(), addon, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // check cma status + assertClusterManagementAddOnDefaultConfigReferences(testAddOnConfigsImpl.name, addonapiv1alpha1.DefaultConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + }) + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name) + + // check mca status + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, managedClusterName, addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + LastObservedGeneration: 1, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: addOnConfig.Namespace, + Name: addOnConfig.Name, + }, + SpecHash: "", + }, + }) + + testAddOnConfigsImpl.supportedConfigGVRs = supportedConfig + }) +}) diff --git a/test/integration/addon/addon_manager_install_test.go b/test/integration/addon/addon_manager_install_test.go new file mode 100644 index 000000000..8ab14f53e --- /dev/null +++ b/test/integration/addon/addon_manager_install_test.go @@ -0,0 +1,174 @@ +package integration + +import ( + "context" + "fmt" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +var _ = ginkgo.Describe("Agent deploy", func() { + suffix := rand.String(5) + var cma *addonapiv1alpha1.ClusterManagementAddOn + var placementNamespace string + var clusterNames []string + + ginkgo.BeforeEach(func() { + // Create clustermanagement addon + cma = &addonapiv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-%s", suffix), + Annotations: map[string]string{ + addonapiv1alpha1.AddonLifecycleAnnotationKey: addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue, + }, + }, + Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyManual, + }, + }, + } + _, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), cma, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + placementNamespace = fmt.Sprintf("ns-%s", suffix) + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: placementNamespace}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + for i := 0; i < 4; i++ { + managedClusterName := fmt.Sprintf("managedcluster-%s-%d", suffix, i) + clusterNames = append(clusterNames, managedClusterName) + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + }) + + ginkgo.AfterEach(func() { + err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), cma.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), placementNamespace, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + for _, managedClusterName := range clusterNames { + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + }) + + ginkgo.Context("Addon install strategy", func() { + ginkgo.It("Should create/delete mca correctly by placement", func() { + placement := &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: placementNamespace}} + _, err := hubClusterClient.ClusterV1beta1().Placements(placementNamespace).Create(context.Background(), placement, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision := &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: placementNamespace, + Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"}, + }, + } + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).Create(context.Background(), decision, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision.Status.Decisions = []clusterv1beta1.ClusterDecision{ + {ClusterName: clusterNames[0]}, + {ClusterName: clusterNames[1]}, + } + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).UpdateStatus(context.Background(), decision, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + clusterManagementAddon, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), cma.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + clusterManagementAddon.Spec.InstallStrategy = addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonapiv1alpha1.PlacementStrategy{ + { + PlacementRef: addonapiv1alpha1.PlacementRef{Name: "test-placement", Namespace: placementNamespace}, + RolloutStrategy: addonapiv1alpha1.RolloutStrategy{ + Type: addonapiv1alpha1.AddonRolloutStrategyUpdateAll, + }, + }, + }, + } + + _, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), clusterManagementAddon, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + _, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[0]).Get(context.Background(), cma.Name, metav1.GetOptions{}) + if err != nil { + return err + } + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[1]).Get(context.Background(), cma.Name, metav1.GetOptions{}) + if err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update the decision + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).Get(context.Background(), "test-placement", metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision.Status.Decisions = []clusterv1beta1.ClusterDecision{ + {ClusterName: clusterNames[1]}, + {ClusterName: clusterNames[2]}, + } + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).UpdateStatus(context.Background(), decision, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + _, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[1]).Get(context.Background(), cma.Name, metav1.GetOptions{}) + if err != nil { + return err + } + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[2]).Get(context.Background(), cma.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[0]).Get(context.Background(), cma.Name, metav1.GetOptions{}) + if !errors.IsNotFound(err) { + return fmt.Errorf("addon in cluster %s should be removed", clusterNames[0]) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // delete an addon and ensure it is recreated. + err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[1]).Delete(context.Background(), cma.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(func() error { + _, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[1]).Get(context.Background(), cma.Name, metav1.GetOptions{}) + if err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + }) +}) diff --git a/test/integration/addon/addon_manager_upgrade_test.go b/test/integration/addon/addon_manager_upgrade_test.go new file mode 100644 index 000000000..ad2463d7b --- /dev/null +++ b/test/integration/addon/addon_manager_upgrade_test.go @@ -0,0 +1,693 @@ +package integration + +import ( + "context" + "fmt" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +const ( + upgradeDeploymentJson = `{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deployment", + "namespace": "default" + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.14.2", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ] + } + ] + } + } + } + }` +) + +var _ = ginkgo.Describe("Addon upgrade", func() { + var configDefaultNamespace string + var configDefaultName string + var configUpdateName string + var placementName string + var placementNamespace string + var manifestWorkName string + var clusterNames []string + var suffix string + var err error + var cma *addonapiv1alpha1.ClusterManagementAddOn + + ginkgo.BeforeEach(func() { + suffix = rand.String(5) + configDefaultNamespace = fmt.Sprintf("default-config-%s", suffix) + configDefaultName = fmt.Sprintf("default-config-%s", suffix) + configUpdateName = fmt.Sprintf("update-config-%s", suffix) + placementName = fmt.Sprintf("ns-%s", suffix) + placementNamespace = fmt.Sprintf("ns-%s", suffix) + manifestWorkName = fmt.Sprintf("%s-0", constants.DeployWorkNamePrefix(testAddOnConfigsImpl.name)) + + // prepare cma + cma = &addonapiv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddOnConfigsImpl.name, + Annotations: map[string]string{ + addonapiv1alpha1.AddonLifecycleAnnotationKey: addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue, + }, + }, + Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyPlacements, + Placements: []addonapiv1alpha1.PlacementStrategy{ + { + PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementName, Namespace: placementNamespace}, + RolloutStrategy: addonapiv1alpha1.RolloutStrategy{ + Type: addonapiv1alpha1.AddonRolloutStrategyUpdateAll, + }, + Configs: []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + }, + }, + }, + }, + }, + }, + } + _, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), cma, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // prepare cluster + for i := 0; i < 4; i++ { + managedClusterName := fmt.Sprintf("managedcluster-%s-%d", suffix, i) + clusterNames = append(clusterNames, managedClusterName) + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + + // prepare manifestwork obj + for i := 0; i < 4; i++ { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(upgradeDeploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddOnConfigsImpl.manifests[clusterNames[i]] = []runtime.Object{obj} + } + + // prepare placement + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: placementNamespace}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + placement := &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: placementName, Namespace: placementNamespace}} + _, err = hubClusterClient.ClusterV1beta1().Placements(placementNamespace).Create(context.Background(), placement, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision := &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: placementName, + Namespace: placementNamespace, + Labels: map[string]string{clusterv1beta1.PlacementLabel: placementName}, + }, + } + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).Create(context.Background(), decision, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision.Status.Decisions = []clusterv1beta1.ClusterDecision{ + {ClusterName: clusterNames[0]}, + {ClusterName: clusterNames[1]}, + {ClusterName: clusterNames[2]}, + {ClusterName: clusterNames[3]}, + } + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).UpdateStatus(context.Background(), decision, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // prepare default config + configDefaultNS := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: configDefaultNamespace}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), configDefaultNS, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addOnDefaultConfig := &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: configDefaultName, + Namespace: configDefaultNamespace, + }, + Spec: addOnDefaultConfigSpec, + } + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create(context.Background(), addOnDefaultConfig, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // prepare update config + addOnUpdateConfig := &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: configUpdateName, + Namespace: configDefaultNamespace, + }, + Spec: addOnTest2ConfigSpec, + } + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create(context.Background(), addOnUpdateConfig, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), configDefaultNamespace, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), testAddOnConfigsImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + for _, managedClusterName := range clusterNames { + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + delete(testAddOnConfigsImpl.registrations, managedClusterName) + } + }) + + ginkgo.Context("Addon rollout strategy", func() { + ginkgo.It("Should update when config changes", func() { + ginkgo.By("fresh install") + ginkgo.By("check work") + gomega.Eventually(func() error { + for i := 0; i < 4; i++ { + work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Annotations) == 0 { + return fmt.Errorf("Unexpected number of work annotations %v", work.Annotations) + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("update work status to trigger addon status") + for i := 0; i < 4; i++ { + work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + + ginkgo.By("check mca status") + for i := 0; i < 4; i++ { + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + LastObservedGeneration: 1, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonInstallSucceed, + Message: "install completed with no errors.", + }) + } + + ginkgo.By("check cma status") + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{ + PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace}, + ConfigReferences: []addonapiv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnDefaultConfigSpecHash, + }, + }, + }, + }) + assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonInstallSucceed, + Message: "4/4 install completed with no errors.", + }) + + ginkgo.By("update all") + ginkgo.By("upgrade configs to test1") + addOnConfig, err := hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Get(context.Background(), configDefaultName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + addOnConfig.Spec = addOnTest1ConfigSpec + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Update(context.Background(), addOnConfig, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ginkgo.By("check mca status") + for i := 0; i < 4; i++ { + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + LastObservedGeneration: 2, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, + Message: "upgrade completed with no errors.", + }) + } + + ginkgo.By("check cma status") + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{ + PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace}, + ConfigReferences: []addonapiv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }, + }, + }) + assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, + Message: "4/4 upgrade completed with no errors.", + }) + + ginkgo.By("update work status to avoid addon status update") + gomega.Eventually(func() error { + for i := 0; i < 4; i++ { + work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionFalse, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("rolling upgrade") + ginkgo.By("update cma to rolling update") + cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), cma.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + cma.Spec.InstallStrategy.Placements[0].RolloutStrategy.Type = addonapiv1alpha1.AddonRolloutStrategyRollingUpdate + cma.Spec.InstallStrategy.Placements[0].RolloutStrategy.RollingUpdate = &addonapiv1alpha1.RollingUpdate{MaxConcurrency: intstr.FromString("50%")} + cma.Spec.InstallStrategy.Placements[0].Configs[0].ConfigReferent = addonapiv1alpha1.ConfigReferent{Namespace: configDefaultNamespace, Name: configUpdateName} + _, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), cma, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ginkgo.By("check mca status") + for i := 0; i < 2; i++ { + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + LastObservedGeneration: 1, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.ProgressingReasonUpgrading, + Message: "upgrading... work is not ready", + }) + } + for i := 2; i < 4; i++ { + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + LastObservedGeneration: 2, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, + Message: "upgrade completed with no errors.", + }) + } + + ginkgo.By("check cma status") + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{ + PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace}, + ConfigReferences: []addonapiv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }, + }, + }) + assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.ProgressingReasonUpgrading, + Message: "2/4 upgrading...", + }) + + ginkgo.By("update 2 work status to trigger addon status") + gomega.Eventually(func() error { + for i := 0; i < 2; i++ { + work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("check mca status") + for i := 0; i < 2; i++ { + assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{ + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + LastObservedGeneration: 1, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, + Message: "upgrade completed with no errors.", + }) + } + + ginkgo.By("check cma status") + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{ + PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace}, + ConfigReferences: []addonapiv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configDefaultName, + }, + SpecHash: addOnTest1ConfigSpecHash, + }, + }, + }, + }) + assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.ProgressingReasonUpgrading, + Message: "4/4 upgrading...", + }) + + ginkgo.By("update another 2 work status to trigger addon status") + gomega.Eventually(func() error { + for i := 2; i < 4; i++ { + work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("check cma status") + assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{ + PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace}, + ConfigReferences: []addonapiv1alpha1.InstallConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{ + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: configDefaultNamespace, + Name: configUpdateName, + }, + SpecHash: addOnTest2ConfigSpecHash, + }, + }, + }, + }) + assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, + Message: "4/4 upgrade completed with no errors.", + }) + }) + }) +}) diff --git a/test/integration/addon/agent_deploy_test.go b/test/integration/addon/agent_deploy_test.go new file mode 100644 index 000000000..7dcc8eac9 --- /dev/null +++ b/test/integration/addon/agent_deploy_test.go @@ -0,0 +1,223 @@ +package integration + +import ( + "context" + "fmt" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +const ( + deploymentJson = `{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deployment", + "namespace": "default" + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.14.2", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ] + } + ] + } + } + } + }` + + mchJson = `{ + "apiVersion": "operator.open-cluster-management.io/v1", + "kind": "MultiClusterHub", + "metadata": { + "name": "multiclusterhub", + "namespace": "open-cluster-management" + }, + "spec": { + "separateCertificateManagement": false + } +}` +) + +var _ = ginkgo.Describe("Agent deploy", func() { + var managedClusterName string + var err error + var manifestWorkName string + ginkgo.BeforeEach(func() { + suffix := rand.String(5) + managedClusterName = fmt.Sprintf("managedcluster-%s", suffix) + manifestWorkName = fmt.Sprintf("%s-0", constants.DeployWorkNamePrefix(testAddonImpl.name)) + + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + cma := newClusterManagementAddon(testAddonImpl.name) + _, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), + cma, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + }) + + ginkgo.AfterEach(func() { + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), + testAddonImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.It("Should deploy agent when cma is managed by addon-manager successfully", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{obj} + testAddonImpl.prober = &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + } + + // Update clustermanagement addon annotattion + cma, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + cma.SetAnnotations(map[string]string{addonapiv1alpha1.AddonLifecycleAnnotationKey: addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue}) + _, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), cma, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Create ManagedClusterAddOn + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.Background(), addon, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) { + return fmt.Errorf("Unexpected addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update work to available so addon becomes available + work, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionAvailable) { + return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + } + if !meta.IsStatusConditionFalse(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) { + return fmt.Errorf("Unexpected addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // do nothing if cluster is deleting and addon is not deleted + cluster, err := hubClusterClient.ClusterV1().ManagedClusters().Get(context.Background(), managedClusterName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + cluster.SetFinalizers([]string{"cluster.open-cluster-management.io/api-resource-cleanup"}) + _, err = hubClusterClient.ClusterV1().ManagedClusters().Update(context.Background(), cluster, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + time.Sleep(5 * time.Second) // wait 5 seconds to sync + gomega.Eventually(func() error { + _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + +}) diff --git a/test/integration/addon/assertion_test.go b/test/integration/addon/assertion_test.go new file mode 100644 index 000000000..24d37b663 --- /dev/null +++ b/test/integration/addon/assertion_test.go @@ -0,0 +1,242 @@ +package integration + +import ( + "context" + "fmt" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +const addOnDefaultConfigSpecHash = "287d774850847584cc3ebd8b72e2ad3ef8ac6c31803a59324943a7f94054b08a" +const addOnTest1ConfigSpecHash = "d76dad0a6448910652950163cc4324e4616ab5143046555c5ad5b003a622ab8d" +const addOnTest2ConfigSpecHash = "3f815fe02492288fd235ed9bd881987aebb6f15fd2fa2b37c982525c293679bd" + +var addOnDefaultConfigSpec = addonapiv1alpha1.AddOnDeploymentConfigSpec{ + CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{ + { + Name: "test", + Value: "test", + }, + }, +} +var addOnTest1ConfigSpec = addonapiv1alpha1.AddOnDeploymentConfigSpec{ + CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{ + { + Name: "test1", + Value: "test1", + }, + }, +} +var addOnTest2ConfigSpec = addonapiv1alpha1.AddOnDeploymentConfigSpec{ + CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{ + { + Name: "test2", + Value: "test2", + }, + }, +} + +func createClusterManagementAddOn(name, defaultConfigNamespace, defaultConfigName string) (*addonapiv1alpha1.ClusterManagementAddOn, error) { + clusterManagementAddon, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + clusterManagementAddon, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create( + context.Background(), + &addonapiv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + addonapiv1alpha1.AddonLifecycleAnnotationKey: addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue, + }, + }, + Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{ + SupportedConfigs: []addonapiv1alpha1.ConfigMeta{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: addOnDeploymentConfigGVR.Group, + Resource: addOnDeploymentConfigGVR.Resource, + }, + DefaultConfig: &addonapiv1alpha1.ConfigReferent{ + Name: defaultConfigName, + Namespace: defaultConfigNamespace, + }, + }, + }, + InstallStrategy: addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyManual, + }, + }, + }, + metav1.CreateOptions{}, + ) + if err != nil { + return nil, err + } + return clusterManagementAddon, nil + } + + if err != nil { + return nil, err + } + + return clusterManagementAddon, nil +} + +func updateClusterManagementAddOn(ctx context.Context, new *addonapiv1alpha1.ClusterManagementAddOn) { + gomega.Eventually(func() bool { + old, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), new.Name, metav1.GetOptions{}) + old.Spec = new.Spec + old.Annotations = new.Annotations + _, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), old, metav1.UpdateOptions{}) + if err == nil { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) +} + +func updateManagedClusterAddOnStatus(ctx context.Context, new *addonapiv1alpha1.ManagedClusterAddOn) { + gomega.Eventually(func() bool { + old, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Get(context.Background(), new.Name, metav1.GetOptions{}) + old.Status = new.Status + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(old.Namespace).UpdateStatus(context.Background(), old, metav1.UpdateOptions{}) + if err == nil { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) +} + +func assertClusterManagementAddOnDefaultConfigReferences(name string, expect ...addonapiv1alpha1.DefaultConfigReference) { + ginkgo.By(fmt.Sprintf("Check ClusterManagementAddOn %s DefaultConfigReferences", name)) + + gomega.Eventually(func() error { + actual, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(actual.Status.DefaultConfigReferences) != len(expect) { + return fmt.Errorf("Expected %v default config reference, actual: %v", len(expect), len(actual.Status.DefaultConfigReferences)) + } + + for i, e := range expect { + actualConfigReference := actual.Status.DefaultConfigReferences[i] + + if !apiequality.Semantic.DeepEqual(actualConfigReference, e) { + return fmt.Errorf("Expected default config is %v, actual: %v", e, actualConfigReference) + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) +} + +func assertClusterManagementAddOnInstallProgression(name string, expect ...addonapiv1alpha1.InstallProgression) { + ginkgo.By(fmt.Sprintf("Check ClusterManagementAddOn %s InstallProgression", name)) + + gomega.Eventually(func() error { + actual, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(actual.Status.InstallProgressions) != len(expect) { + return fmt.Errorf("Expected %v install progression, actual: %v", len(expect), len(actual.Status.InstallProgressions)) + } + + for i, e := range expect { + actualInstallProgression := actual.Status.InstallProgressions[i] + + if !apiequality.Semantic.DeepEqual(actualInstallProgression.ConfigReferences, e.ConfigReferences) { + return fmt.Errorf("Expected InstallProgression.ConfigReferences is %v, actual: %v", e.ConfigReferences, actualInstallProgression.ConfigReferences) + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) +} + +func assertClusterManagementAddOnConditions(name string, expect ...metav1.Condition) { + ginkgo.By(fmt.Sprintf("Check ClusterManagementAddOn %s Conditions", name)) + + gomega.Eventually(func() error { + actual, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + for i, ec := range expect { + cond := meta.FindStatusCondition(actual.Status.InstallProgressions[i].Conditions, ec.Type) + if cond == nil || + cond.Status != ec.Status || + cond.Reason != ec.Reason || + cond.Message != ec.Message { + return fmt.Errorf("Expected cma progressing condition is %v, actual: %v", ec, cond) + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) +} + +func assertManagedClusterAddOnConfigReferences(name, namespace string, expect ...addonapiv1alpha1.ConfigReference) { + ginkgo.By(fmt.Sprintf("Check ManagedClusterAddOn %s/%s ConfigReferences", namespace, name)) + + gomega.Eventually(func() error { + actual, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(actual.Status.ConfigReferences) != len(expect) { + return fmt.Errorf("Expected %v config reference, actual: %v", len(expect), len(actual.Status.ConfigReferences)) + } + + for i, e := range expect { + actualConfigReference := actual.Status.ConfigReferences[i] + + if !apiequality.Semantic.DeepEqual(actualConfigReference, e) { + return fmt.Errorf("Expected mca config reference is %v %v %v, actual: %v %v %v", + e.DesiredConfig, + e.LastAppliedConfig, + e.LastObservedGeneration, + actualConfigReference.DesiredConfig, + actualConfigReference.LastAppliedConfig, + actualConfigReference.LastObservedGeneration, + ) + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) +} + +func assertManagedClusterAddOnConditions(name, namespace string, expect ...metav1.Condition) { + ginkgo.By(fmt.Sprintf("Check ManagedClusterAddOn %s/%s Conditions", namespace, name)) + + gomega.Eventually(func() error { + actual, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, ec := range expect { + cond := meta.FindStatusCondition(actual.Status.Conditions, ec.Type) + if cond == nil || + cond.Status != ec.Status || + cond.Reason != ec.Reason || + cond.Message != ec.Message { + return fmt.Errorf("Expected addon progressing condition is %v, actual: %v", ec, cond) + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) +} diff --git a/test/integration/addon/suite_test.go b/test/integration/addon/suite_test.go new file mode 100644 index 000000000..170ed6dba --- /dev/null +++ b/test/integration/addon/suite_test.go @@ -0,0 +1,183 @@ +package integration + +import ( + "context" + "path/filepath" + "testing" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/openshift/library-go/pkg/controller/controllercmd" + certificatesv1 "k8s.io/api/certificates/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + "open-cluster-management.io/addon-framework/pkg/addonmanager" + "open-cluster-management.io/addon-framework/pkg/agent" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/ocm/pkg/addon" + "open-cluster-management.io/ocm/test/integration/util" +) + +const ( + eventuallyTimeout = 30 // seconds + eventuallyInterval = 1 // seconds +) + +var addOnDeploymentConfigGVR = schema.GroupVersionResource{ + Group: "addon.open-cluster-management.io", + Version: "v1alpha1", + Resource: "addondeploymentconfigs", +} + +var testEnv *envtest.Environment +var hubWorkClient workclientset.Interface +var hubClusterClient clusterv1client.Interface +var hubAddonClient addonv1alpha1client.Interface +var hubKubeClient kubernetes.Interface +var testAddonImpl *testAddon +var testAddOnConfigsImpl *testAddon + +var cancel context.CancelFunc +var mgrContext context.Context +var addonManager addonmanager.AddonManager + +func TestIntegration(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Integration Suite") +} + +var _ = ginkgo.BeforeSuite(func() { + ginkgo.By("bootstrapping test environment") + + // start a kube-apiserver + testEnv = &envtest.Environment{ + ErrorIfCRDPathMissing: true, + CRDDirectoryPaths: []string{ + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "work", "v1", "0000_00_work.open-cluster-management.io_manifestworks.crd.yaml"), + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "cluster", "v1"), + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "cluster", "v1beta1"), + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "addon", "v1alpha1"), + }, + } + + cfg, err := testEnv.Start() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(cfg).ToNot(gomega.BeNil()) + + hubWorkClient, err = workclientset.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + hubClusterClient, err = clusterv1client.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + hubAddonClient, err = addonv1alpha1client.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + hubKubeClient, err = kubernetes.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + testAddonImpl = &testAddon{ + name: "test", + manifests: map[string][]runtime.Object{}, + registrations: map[string][]addonapiv1alpha1.RegistrationConfig{}, + } + + testAddOnConfigsImpl = &testAddon{ + name: "test-addon-configs", + manifests: map[string][]runtime.Object{}, + registrations: map[string][]addonapiv1alpha1.RegistrationConfig{}, + supportedConfigGVRs: []schema.GroupVersionResource{addOnDeploymentConfigGVR}, + } + + mgrContext, cancel = context.WithCancel(context.TODO()) + // start hub controller + go func() { + addonManager, err = addonmanager.New(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = addonManager.AddAgent(testAddonImpl) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = addonManager.AddAgent(testAddOnConfigsImpl) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = addonManager.Start(mgrContext) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = addon.RunManager(mgrContext, &controllercmd.ControllerContext{ + KubeConfig: cfg, + EventRecorder: util.NewIntegrationTestEventRecorder("integration"), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + +}) + +var _ = ginkgo.AfterSuite(func() { + ginkgo.By("tearing down the test environment") + + cancel() + err := testEnv.Stop() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) +}) + +type testAddon struct { + name string + manifests map[string][]runtime.Object + registrations map[string][]addonapiv1alpha1.RegistrationConfig + approveCSR bool + cert []byte + prober *agent.HealthProber + installStrategy *agent.InstallStrategy + hostedModeEnabled bool + supportedConfigGVRs []schema.GroupVersionResource +} + +func (t *testAddon) Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + return t.manifests[cluster.Name], nil +} + +func (t *testAddon) GetAgentAddonOptions() agent.AgentAddonOptions { + option := agent.AgentAddonOptions{ + AddonName: t.name, + HealthProber: t.prober, + InstallStrategy: t.installStrategy, + HostedModeEnabled: t.hostedModeEnabled, + SupportedConfigGVRs: t.supportedConfigGVRs, + } + + if len(t.registrations) > 0 { + option.Registration = &agent.RegistrationOption{ + CSRConfigurations: func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + return t.registrations[cluster.Name] + }, + CSRApproveCheck: func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, csr *certificatesv1.CertificateSigningRequest) bool { + return t.approveCSR + }, + CSRSign: func(csr *certificatesv1.CertificateSigningRequest) []byte { + return t.cert + }, + } + } + + return option +} + +func newClusterManagementAddon(name string) *addonapiv1alpha1.ClusterManagementAddOn { + return &addonapiv1alpha1.ClusterManagementAddOn{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyManual, + }, + }, + } +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000..fe79e3add --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +/toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000..01b574320 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000..3651cfa96 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,120 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; add it to your go.mod with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Examples +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which can be decoded with: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time +} + +var conf Config +_, err := toml.Decode(tomlData, &conf) +``` + +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: + +```toml +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] +``` + +Can be decoded with: + +```go +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address +} + +func (a *address) UnmarshalText(text []byte) error { + var err error + a.Address, err = mail.ParseAddress(string(text)) + return err +} + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000..0ca1dc4fe --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,602 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) + return err +} + +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. +// +// ### Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + if v, ok := rvi.(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return md.e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return md.e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + + err := md.unify(v, indirect(rvval)) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + + if num, ok := data.(float64); ok { + switch rvk { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) + } + rv.SetInt(int64(dur)) + return nil + } + } + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +func (md *MetaData) badtype(dst string, data interface{}) error { + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { + return true + } + return false +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 000000000..086d0b686 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 000000000..c6af3f239 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,21 @@ +package toml + +import ( + "encoding" + "io" +) + +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000..81a7c0fe9 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,11 @@ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// There is also support for delaying decoding with the Primitive type, and +// querying the set of keys in a TOML document with the MetaData type. +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000..930e1d521 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,750 @@ +package toml + +import ( + "bufio" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // String to use for a single indentation level; default is two spaces. + Indent string + + w *bufio.Writer + hasWritten bool // written any output to w yet? +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) + } + + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := eindirect(rv.Index(i)) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := eindirect(rv.Index(i)) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + if is32Bit { + copyStart := make([]int, len(start)) + copy(copyStart, start) + fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + + if opts.omitempty && enc.isEmpty(fieldVal) { + continue + } + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if isTableArray(rv) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + default: + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false + } + + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { + encPanic(errArrayNilElement) + } + + if ret && !typeEqual(tomlHash, tt) { + ret = false + } + } + return ret +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func (enc *Encoder) isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !enc.isEmpty(rv.Field(i)) { + return false + } + } + return true + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + if len(key) == 0 { + encPanic(errNoKey) + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +// Resolve any level of pointers to the actual value (e.g. **string → string). +func eindirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 000000000..f4f390e64 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,279 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithUsage() returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage() returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 000000000..022f15bc2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000..d4d70871d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1233 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning ''' has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + if isNL(lx.next()) { /// \ escaping newline. + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 000000000..71847a041 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,121 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo + mapping map[string]interface{} + keys []Key + decoded map[string]struct{} + data []byte // Input file; for errors. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a [Primitive] value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000..d2542d6f9 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,781 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]interface{} // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]interface{}), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash, item.pos) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ, it.pos) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { + p.setValue(key, val) + p.setType(key, typ, pos) + +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType, pos Position) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// Remove newlines inside triple-quoted strings if a line ends with "\". +func (p *parser) stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + if i == len(split)-1 { + p.panicf("invalid escape: '\\ '") + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + } + } + return strings.Join(split, "") +} + +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000..254ca82e5 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 000000000..4e90d7737 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 000000000..4025e01ec --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 000000000..d700ec47f --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 000000000..163ffe72a --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 000000000..657564a84 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 000000000..8dbd92485 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 000000000..272670231 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 000000000..741bb530e --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 000000000..034cad8e2 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 000000000..6b061e617 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 000000000..c87d1c4b9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,30 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - structcheck + - govet + - staticcheck + - deadcode + - errcheck + - varcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 000000000..f12626423 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,214 @@ +# Changelog + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 000000000..9ff7da9c4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 000000000..eac19178f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 000000000..d8f54dcbd --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 000000000..a78235895 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 000000000..203072e46 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 000000000..74f97caa5 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 000000000..a242ad705 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 000000000..7c4bed334 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,639 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 000000000..5e3002f88 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 000000000..2ce45dd4e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 000000000..f311b1eaa --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 000000000..78d409cde --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 000000000..3e22c60e1 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,100 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release, there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.10 or later of that package. +Using v0.3.9 will cause sprig tests to fail. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig/v3" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 000000000..13a5cd559 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,653 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 000000000..ed022ddac --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 000000000..b9f979666 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 000000000..ade889698 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 000000000..aabb9d448 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 000000000..57fcec1d9 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,382 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 000000000..ca0fbb789 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 000000000..108d78a94 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 000000000..f68e4182e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 000000000..8a65c132f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 000000000..fab551018 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 000000000..3fbe08aa6 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 000000000..e0ae628c8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 000000000..b8e120e19 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore new file mode 100644 index 000000000..8d69a9418 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.gitignore @@ -0,0 +1,15 @@ +bin/ +.idea/ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml index e29f8eef5..17c4d0a71 100644 --- a/vendor/github.com/asaskevich/govalidator/.travis.yml +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -1,14 +1,18 @@ +dist: bionic language: go +env: GO111MODULE=on GOFLAGS='-mod vendor' +install: true +email: false go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 + - 1.10 + - 1.11 + - 1.12 + - 1.13 - tip -notifications: - email: - - bwatas@gmail.com +before_script: + - go install github.com/golangci/golangci-lint/cmd/golangci-lint +script: + - golangci-lint run # run a bunch of code checkers/linters in parallel + - go test -v -race ./... # Run all the tests with the race detector enabled diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md index f0f7e3a8a..7ed268a1e 100644 --- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -11,7 +11,7 @@ If you don't know what to do, there are some features and functions that need to - [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) - [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new - [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) - [ ] Implement fuzzing testing - [ ] Implement some struct/map/array utilities - [ ] Implement map/array validation @@ -37,7 +37,7 @@ Anyone can file an expense. If the expense makes sense for the development of th ### Contributors Thank you to all the people who have already contributed to govalidator! - + ### Backers diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md index 40f9a8781..78f999e83 100644 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -13,7 +13,7 @@ Type the following command in your terminal: or you can get specified release of the package with `gopkg.in`: - go get gopkg.in/asaskevich/govalidator.v4 + go get gopkg.in/asaskevich/govalidator.v10 After it the package is ready to use. @@ -83,14 +83,14 @@ This was changed to prevent data races when accessing custom validators. import "github.com/asaskevich/govalidator" // before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool { +govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { // ... -}) +} // after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { // ... -})) +}) ``` #### List of functions: @@ -108,23 +108,34 @@ func Filter(array []interface{}, iterator ConditionIterator) []interface{} func Find(array []interface{}, iterator ConditionIterator) interface{} func GetLine(s string, index int) (string, error) func GetLines(s string) []string -func InRange(value, left, right float64) bool +func HasLowerCase(str string) bool +func HasUpperCase(str string) bool +func HasWhitespace(str string) bool +func HasWhitespaceOnly(str string) bool +func InRange(value interface{}, left interface{}, right interface{}) bool +func InRangeFloat32(value, left, right float32) bool +func InRangeFloat64(value, left, right float64) bool +func InRangeInt(value, left, right interface{}) bool func IsASCII(str string) bool func IsAlpha(str string) bool func IsAlphanumeric(str string) bool func IsBase64(str string) bool func IsByteLength(str string, min, max int) bool func IsCIDR(str string) bool +func IsCRC32(str string) bool +func IsCRC32b(str string) bool func IsCreditCard(str string) bool func IsDNSName(str string) bool func IsDataURI(str string) bool func IsDialString(str string) bool func IsDivisibleBy(str, num string) bool func IsEmail(str string) bool +func IsExistingEmail(email string) bool func IsFilePath(str string) (bool, int) func IsFloat(str string) bool func IsFullWidth(str string) bool func IsHalfWidth(str string) bool +func IsHash(str string, algorithm string) bool func IsHexadecimal(str string) bool func IsHexcolor(str string) bool func IsHost(str string) bool @@ -136,22 +147,27 @@ func IsISBN10(str string) bool func IsISBN13(str string) bool func IsISO3166Alpha2(str string) bool func IsISO3166Alpha3(str string) bool +func IsISO4217(str string) bool func IsISO693Alpha2(str string) bool func IsISO693Alpha3b(str string) bool -func IsISO4217(str string) bool func IsIn(str string, params ...string) bool +func IsInRaw(str string, params ...string) bool func IsInt(str string) bool func IsJSON(str string) bool func IsLatitude(str string) bool func IsLongitude(str string) bool func IsLowerCase(str string) bool func IsMAC(str string) bool +func IsMD4(str string) bool +func IsMD5(str string) bool +func IsMagnetURI(str string) bool func IsMongoID(str string) bool func IsMultibyte(str string) bool func IsNatural(value float64) bool func IsNegative(value float64) bool func IsNonNegative(value float64) bool func IsNonPositive(value float64) bool +func IsNotNull(str string) bool func IsNull(str string) bool func IsNumeric(str string) bool func IsPort(str string) bool @@ -162,9 +178,21 @@ func IsRFC3339WithoutZone(str string) bool func IsRGBcolor(str string) bool func IsRequestURI(rawurl string) bool func IsRequestURL(rawurl string) bool +func IsRipeMD128(str string) bool +func IsRipeMD160(str string) bool +func IsRsaPub(str string, params ...string) bool +func IsRsaPublicKey(str string, keylen int) bool +func IsSHA1(str string) bool +func IsSHA256(str string) bool +func IsSHA384(str string) bool +func IsSHA512(str string) bool func IsSSN(str string) bool func IsSemver(str string) bool +func IsTiger128(str string) bool +func IsTiger160(str string) bool +func IsTiger192(str string) bool func IsTime(str string, format string) bool +func IsType(v interface{}, params ...string) bool func IsURL(str string) bool func IsUTFDigit(str string) bool func IsUTFLetter(str string) bool @@ -174,16 +202,20 @@ func IsUUID(str string) bool func IsUUIDv3(str string) bool func IsUUIDv4(str string) bool func IsUUIDv5(str string) bool +func IsUnixTime(str string) bool func IsUpperCase(str string) bool func IsVariableWidth(str string) bool func IsWhole(value float64) bool func LeftTrim(str, chars string) string func Map(array []interface{}, iterator ResultIterator) []interface{} func Matches(str, pattern string) bool +func MaxStringLength(str string, params ...string) bool +func MinStringLength(str string, params ...string) bool func NormalizeEmail(str string) (string, error) func PadBoth(str string, padStr string, padLen int) string func PadLeft(str string, padStr string, padLen int) string func PadRight(str string, padStr string, padLen int) string +func PrependPathToErrors(err error, path string) error func Range(str string, params ...string) bool func RemoveTags(s string) string func ReplacePattern(str, pattern, replace string) string @@ -192,18 +224,21 @@ func RightTrim(str, chars string) string func RuneLength(str string, params ...string) bool func SafeFileName(str string) string func SetFieldsRequiredByDefault(value bool) +func SetNilPtrAllowedByRequired(value bool) func Sign(value float64) float64 func StringLength(str string, params ...string) bool func StringMatches(s string, params ...string) bool func StripLow(str string, keepNewLines bool) string func ToBoolean(str string) (bool, error) func ToFloat(str string) (float64, error) -func ToInt(str string) (int64, error) +func ToInt(value interface{}) (res int64, err error) func ToJSON(obj interface{}) (string, error) func ToString(obj interface{}) string func Trim(str, chars string) string func Truncate(str string, length int, ending string) string +func TruncatingErrorf(str string, args ...interface{}) error func UnderscoreToCamelCase(s string) string +func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) func ValidateStruct(s interface{}) (bool, error) func WhiteList(str, chars string) string type ConditionIterator @@ -214,6 +249,8 @@ type Errors func (es Errors) Error() string func (es Errors) Errors() []error type ISO3166Entry +type ISO693Entry +type InterfaceParamValidator type Iterator type ParamValidator type ResultIterator @@ -227,6 +264,27 @@ type Validator ```go println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) ``` +###### IsType +```go +println(govalidator.IsType("Bob", "string")) +println(govalidator.IsType(1, "int")) +i := 1 +println(govalidator.IsType(&i, "*int")) +``` + +IsType can be used through the tag `type` which is essential for map validation: +```go +type User struct { + Name string `valid:"type(string)"` + Age int `valid:"type(int)"` + Meta interface{} `valid:"type(string)"` +} +result, err := govalidator.ValidateStruct(user{"Bob", 20, "meta"}) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` ###### ToString ```go type User struct { @@ -335,6 +393,11 @@ Validators with parameters "in(string1|string2|...|stringN)": IsIn, "rsapub(keylength)" : IsRsaPub, ``` +Validators with parameters for any type + +```go +"type(type)": IsType, +``` And here is small example of usage: ```go @@ -370,6 +433,41 @@ if err != nil { } println(result) ``` +###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) +If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` + +So here is small example of usage: +```go +var mapTemplate = map[string]interface{}{ + "name":"required,alpha", + "family":"required,alpha", + "email":"required,email", + "cell-phone":"numeric", + "address":map[string]interface{}{ + "line1":"required,alphanum", + "line2":"alphanum", + "postal-code":"numeric", + }, +} + +var inputMap = map[string]interface{}{ + "name":"Bob", + "family":"Smith", + "email":"foo@bar.baz", + "address":map[string]interface{}{ + "line1":"", + "line2":"", + "postal-code":"", + }, +} + +result, err := govalidator.ValidateMap(inputMap, mapTemplate) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` + ###### WhiteList ```go // Remove all characters from string ignoring characters between "a" and "z" @@ -389,7 +487,7 @@ type StructWithCustomByteArray struct { CustomMinLength int `valid:"-"` } -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { switch v := context.(type) { // you can type switch on the context interface being validated case StructWithCustomByteArray: // you can check and validate against some other field in the context, @@ -409,14 +507,25 @@ govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator } } return false -})) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { +}) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation case StructWithCustomByteArray: return len(v.ID) >= v.CustomMinLength } return false -})) +}) +``` + +###### Loop over Error() +By default .Error() returns all errors in a single String. To access each error you can do this: +```go + if err != nil { + errs := err.(govalidator.Errors).Errors() + for _, e := range errs { + fmt.Println(e.Error()) + } + } ``` ###### Custom error messages @@ -445,7 +554,7 @@ If you don't know what to do, there are some features and functions that need to - [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) - [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new - [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) - [ ] Implement fuzzing testing - [ ] Implement some struct/map/array utilities - [ ] Implement map/array validation @@ -475,7 +584,7 @@ This project exists thanks to all the people who contribute. [[Contribute](CONTR * [Matt Sanford](https://github.com/mzsanford) * [Simon ccl1115](https://github.com/ccl1115) - + ### Backers @@ -504,4 +613,4 @@ Support this project by becoming a sponsor. Your logo will show up here with a l ## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) \ No newline at end of file +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go new file mode 100644 index 000000000..55dce62dc --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/doc.go @@ -0,0 +1,3 @@ +package govalidator + +// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go index 655b750cb..1da2336f4 100644 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -1,6 +1,9 @@ package govalidator -import "strings" +import ( + "sort" + "strings" +) // Errors is an array of multiple errors and conforms to the error interface. type Errors []error @@ -15,6 +18,7 @@ func (es Errors) Error() string { for _, e := range es { errs = append(errs, e.Error()) } + sort.Strings(errs) return strings.Join(errs, ";") } diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go index 61a05d438..e55451cff 100644 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -4,49 +4,51 @@ import "regexp" // Basic regular expressions for validating strings const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" - ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" - ISBN13 string = "^(?:[0-9]{13})$" - UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - Alpha string = "^[a-zA-Z]+$" - Alphanumeric string = "^[a-zA-Z0-9]+$" - Numeric string = "^[0-9]+$" - Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" - Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" - Hexadecimal string = "^[0-9a-fA-F]+$" - Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" - ASCII string = "^[\x00-\x7F]+$" - Multibyte string = "[^\x00-\x7F]" - FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - PrintableASCII string = "^[\x20-\x7E]+$" - DataURI string = "^data:.+\\/(.+);base64$" - Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` - IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername string = `(\S+(:\S*)?@)` - URLPath string = `((\/|\?|#)[^\s]*)` - URLPort string = `(:(\d{1,5}))` - URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` - URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` - SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` - WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixPath string = `^(/[^/\x00]*)+/?$` - Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" - tagName string = "valid" - hasLowerCase string = ".*[[:lower:]]" - hasUpperCase string = ".*[[:upper:]]" - hasWhitespace string = ".*[[:space:]]" - hasWhitespaceOnly string = "^[[:space:]]+$" + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" + IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" ) // Used by IsFilePath func @@ -60,42 +62,44 @@ const ( ) var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") - rxEmail = regexp.MustCompile(Email) - rxCreditCard = regexp.MustCompile(CreditCard) - rxISBN10 = regexp.MustCompile(ISBN10) - rxISBN13 = regexp.MustCompile(ISBN13) - rxUUID3 = regexp.MustCompile(UUID3) - rxUUID4 = regexp.MustCompile(UUID4) - rxUUID5 = regexp.MustCompile(UUID5) - rxUUID = regexp.MustCompile(UUID) - rxAlpha = regexp.MustCompile(Alpha) - rxAlphanumeric = regexp.MustCompile(Alphanumeric) - rxNumeric = regexp.MustCompile(Numeric) - rxInt = regexp.MustCompile(Int) - rxFloat = regexp.MustCompile(Float) - rxHexadecimal = regexp.MustCompile(Hexadecimal) - rxHexcolor = regexp.MustCompile(Hexcolor) - rxRGBcolor = regexp.MustCompile(RGBcolor) - rxASCII = regexp.MustCompile(ASCII) - rxPrintableASCII = regexp.MustCompile(PrintableASCII) - rxMultibyte = regexp.MustCompile(Multibyte) - rxFullWidth = regexp.MustCompile(FullWidth) - rxHalfWidth = regexp.MustCompile(HalfWidth) - rxBase64 = regexp.MustCompile(Base64) - rxDataURI = regexp.MustCompile(DataURI) - rxLatitude = regexp.MustCompile(Latitude) - rxLongitude = regexp.MustCompile(Longitude) - rxDNSName = regexp.MustCompile(DNSName) - rxURL = regexp.MustCompile(URL) - rxSSN = regexp.MustCompile(SSN) - rxWinPath = regexp.MustCompile(WinPath) - rxUnixPath = regexp.MustCompile(UnixPath) - rxSemver = regexp.MustCompile(Semver) - rxHasLowerCase = regexp.MustCompile(hasLowerCase) - rxHasUpperCase = regexp.MustCompile(hasUpperCase) - rxHasWhitespace = regexp.MustCompile(hasWhitespace) - rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxMagnetURI = regexp.MustCompile(MagnetURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + rxIMEI = regexp.MustCompile(IMEI) ) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go index 4f7e9274a..b57b666f5 100644 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -16,6 +16,7 @@ type CustomTypeValidator func(i interface{}, o interface{}) bool // ParamValidator is a wrapper for validator functions that accepts additional parameters. type ParamValidator func(str string, params ...string) bool +type InterfaceParamValidator func(in interface{}, params ...string) bool type tagOptionsMap map[string]tagOption func (t tagOptionsMap) orderedKeys() []string { @@ -46,15 +47,27 @@ type UnsupportedTypeError struct { // It implements the methods to sort by string. type stringValues []reflect.Value +// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value +var InterfaceParamTagMap = map[string]InterfaceParamValidator{ + "type": IsType, +} + +// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. +var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ + "type": regexp.MustCompile(`^type\((.*)\)$`), +} + // ParamTagMap is a map of functions accept variants parameters var ParamTagMap = map[string]ParamValidator{ - "length": ByteLength, - "range": Range, - "runelength": RuneLength, - "stringlength": StringLength, - "matches": StringMatches, - "in": isInRaw, - "rsapub": IsRsaPub, + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": IsInRaw, + "rsapub": IsRsaPub, + "minstringlength": MinStringLength, + "maxstringlength": MaxStringLength, } // ParamTagRegexMap maps param tags to their respective regexes. @@ -66,6 +79,8 @@ var ParamTagRegexMap = map[string]*regexp.Regexp{ "in": regexp.MustCompile(`^in\((.*)\)`), "matches": regexp.MustCompile(`^matches\((.+)\)$`), "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), + "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), + "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), } type customTypeTagMap struct { @@ -114,6 +129,7 @@ var TagMap = map[string]Validator{ "int": IsInt, "float": IsFloat, "null": IsNull, + "notnull": IsNotNull, "uuid": IsUUID, "uuidv3": IsUUIDv3, "uuidv4": IsUUIDv4, @@ -146,6 +162,7 @@ var TagMap = map[string]Validator{ "ISO3166Alpha2": IsISO3166Alpha2, "ISO3166Alpha3": IsISO3166Alpha3, "ISO4217": IsISO4217, + "IMEI": IsIMEI, } // ISO3166Entry stores country codes diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go index a0b706a74..f4c30f824 100644 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -12,20 +12,20 @@ import ( "unicode/utf8" ) -// Contains check if the string contains the substring. +// Contains checks if the string contains the substring. func Contains(str, substring string) bool { return strings.Contains(str, substring) } -// Matches check if string matches the pattern (pattern is regular expression) +// Matches checks if string matches the pattern (pattern is regular expression) // In case of error return false func Matches(str, pattern string) bool { match, _ := regexp.MatchString(pattern, str) return match } -// LeftTrim trim characters from the left-side of the input. -// If second argument is empty, it's will be remove leading spaces. +// LeftTrim trims characters from the left side of the input. +// If second argument is empty, it will remove leading spaces. func LeftTrim(str, chars string) string { if chars == "" { return strings.TrimLeftFunc(str, unicode.IsSpace) @@ -34,8 +34,8 @@ func LeftTrim(str, chars string) string { return r.ReplaceAllString(str, "") } -// RightTrim trim characters from the right-side of the input. -// If second argument is empty, it's will be remove spaces. +// RightTrim trims characters from the right side of the input. +// If second argument is empty, it will remove trailing spaces. func RightTrim(str, chars string) string { if chars == "" { return strings.TrimRightFunc(str, unicode.IsSpace) @@ -44,27 +44,27 @@ func RightTrim(str, chars string) string { return r.ReplaceAllString(str, "") } -// Trim trim characters from both sides of the input. -// If second argument is empty, it's will be remove spaces. +// Trim trims characters from both sides of the input. +// If second argument is empty, it will remove spaces. func Trim(str, chars string) string { return LeftTrim(RightTrim(str, chars), chars) } -// WhiteList remove characters that do not appear in the whitelist. +// WhiteList removes characters that do not appear in the whitelist. func WhiteList(str, chars string) string { pattern := "[^" + chars + "]+" r, _ := regexp.Compile(pattern) return r.ReplaceAllString(str, "") } -// BlackList remove characters that appear in the blacklist. +// BlackList removes characters that appear in the blacklist. func BlackList(str, chars string) string { pattern := "[" + chars + "]+" r, _ := regexp.Compile(pattern) return r.ReplaceAllString(str, "") } -// StripLow remove characters with a numerical value < 32 and 127, mostly control characters. +// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. // If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). func StripLow(str string, keepNewLines bool) string { chars := "" @@ -76,13 +76,13 @@ func StripLow(str string, keepNewLines bool) string { return BlackList(str, chars) } -// ReplacePattern replace regular expression pattern in string +// ReplacePattern replaces regular expression pattern in string func ReplacePattern(str, pattern, replace string) string { r, _ := regexp.Compile(pattern) return r.ReplaceAllString(str, replace) } -// Escape replace <, >, & and " with HTML entities. +// Escape replaces <, >, & and " with HTML entities. var Escape = html.EscapeString func addSegment(inrune, segment []rune) []rune { @@ -120,7 +120,7 @@ func CamelCaseToUnderscore(str string) string { return string(output) } -// Reverse return reversed string +// Reverse returns reversed string func Reverse(s string) string { r := []rune(s) for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { @@ -129,12 +129,12 @@ func Reverse(s string) string { return string(r) } -// GetLines split string by "\n" and return array of lines +// GetLines splits string by "\n" and return array of lines func GetLines(s string) []string { return strings.Split(s, "\n") } -// GetLine return specified line of multiline string +// GetLine returns specified line of multiline string func GetLine(s string, index int) (string, error) { lines := GetLines(s) if index < 0 || index >= len(lines) { @@ -143,12 +143,12 @@ func GetLine(s string, index int) (string, error) { return lines[index], nil } -// RemoveTags remove all tags from HTML string +// RemoveTags removes all tags from HTML string func RemoveTags(s string) string { return ReplacePattern(s, "<[^>]*>", "") } -// SafeFileName return safe string that can be used in file names +// SafeFileName returns safe string that can be used in file names func SafeFileName(str string) string { name := strings.ToLower(str) name = path.Clean(path.Base(name)) @@ -210,23 +210,23 @@ func Truncate(str string, length int, ending string) string { return str } -// PadLeft pad left side of string if size of string is less then indicated pad length +// PadLeft pads left side of a string if size of string is less then indicated pad length func PadLeft(str string, padStr string, padLen int) string { return buildPadStr(str, padStr, padLen, true, false) } -// PadRight pad right side of string if size of string is less then indicated pad length +// PadRight pads right side of a string if size of string is less then indicated pad length func PadRight(str string, padStr string, padLen int) string { return buildPadStr(str, padStr, padLen, false, true) } -// PadBoth pad sides of string if size of string is less then indicated pad length +// PadBoth pads both sides of a string if size of string is less then indicated pad length func PadBoth(str string, padStr string, padLen int) string { return buildPadStr(str, padStr, padLen, true, true) } -// PadString either left, right or both sides, not the padding string can be unicode and more then one -// character +// PadString either left, right or both sides. +// Note that padding string can be unicode and more then one character func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { // When padded length is less then the current string size diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go index b18bbcb4c..298f9920d 100644 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -282,7 +282,7 @@ func HasLowerCase(str string) bool { return rxHasLowerCase.MatchString(str) } -// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. +// HasUpperCase check if the string contains as least 1 uppercase. Empty string is valid. func HasUpperCase(str string) bool { if IsNull(str) { return true @@ -321,14 +321,19 @@ func IsNull(str string) bool { return len(str) == 0 } +// IsNotNull check if the string is not null. +func IsNotNull(str string) bool { + return !IsNull(str) +} + // HasWhitespaceOnly checks the string only contains whitespace func HasWhitespaceOnly(str string) bool { - return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) } // HasWhitespace checks if the string contains any whitespace func HasWhitespace(str string) bool { - return len(str) > 0 && rxHasWhitespace.MatchString(str) + return len(str) > 0 && rxHasWhitespace.MatchString(str) } // IsByteLength check if the string's length (in bytes) falls in a range. @@ -513,6 +518,11 @@ func IsDataURI(str string) bool { return IsBase64(dataURI[1]) } +// IsMagnetURI checks if a string is valid magnet URI +func IsMagnetURI(str string) bool { + return rxMagnetURI.MatchString(str) +} + // IsISO3166Alpha2 checks if a string is valid two-letter country code func IsISO3166Alpha2(str string) bool { for _, entry := range ISO3166List { @@ -565,7 +575,7 @@ func IsDNSName(str string) bool { // IsHash checks if a string is a hash of type algorithm. // Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] func IsHash(str string, algorithm string) bool { - len := "0" + var len string algo := strings.ToLower(algorithm) if algo == "crc32" || algo == "crc32b" { @@ -589,9 +599,73 @@ func IsHash(str string, algorithm string) bool { return Matches(str, "^[a-f0-9]{"+len+"}$") } +// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` +func IsSHA512(str string) bool { + return IsHash(str, "sha512") +} + +// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` +func IsSHA384(str string) bool { + return IsHash(str, "sha384") +} + +// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` +func IsSHA256(str string) bool { + return IsHash(str, "sha256") +} + +// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` +func IsTiger192(str string) bool { + return IsHash(str, "tiger192") +} + +// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` +func IsTiger160(str string) bool { + return IsHash(str, "tiger160") +} + +// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` +func IsRipeMD160(str string) bool { + return IsHash(str, "ripemd160") +} + +// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` +func IsSHA1(str string) bool { + return IsHash(str, "sha1") +} + +// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` +func IsTiger128(str string) bool { + return IsHash(str, "tiger128") +} + +// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` +func IsRipeMD128(str string) bool { + return IsHash(str, "ripemd128") +} + +// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` +func IsCRC32(str string) bool { + return IsHash(str, "crc32") +} + +// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` +func IsCRC32b(str string) bool { + return IsHash(str, "crc32b") +} + +// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` +func IsMD5(str string) bool { + return IsHash(str, "md5") +} + +// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` +func IsMD4(str string) bool { + return IsHash(str, "md4") +} + // IsDialString validates the given string for usage with the various Dial() functions func IsDialString(str string) bool { - if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { return true } @@ -599,7 +673,7 @@ func IsDialString(str string) bool { return false } -// IsIP checks if a string is either IP version 4 or 6. +// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` func IsIP(str string) bool { return net.ParseIP(str) != nil } @@ -663,6 +737,11 @@ func IsLongitude(str string) bool { return rxLongitude.MatchString(str) } +// IsIMEI check if a string is valid IMEI +func IsIMEI(str string) bool { + return rxIMEI.MatchString(str) +} + // IsRsaPublicKey check if a string is valid public key with provided length func IsRsaPublicKey(str string, keylen int) bool { bb := bytes.NewBufferString(str) @@ -729,12 +808,116 @@ func PrependPathToErrors(err error, path string) error { } return err2 } - fmt.Println(err) return err } +// ValidateMap use validation map for fields. +// result will be equal to `false` if there are any errors. +// s is the map containing the data to be validated. +// m is the validation map in the form: +// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} +func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + var errs Errors + var index int + val := reflect.ValueOf(s) + for key, value := range s { + presentResult := true + validator, ok := m[key] + if !ok { + presentResult = false + var err error + err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + valueField := reflect.ValueOf(value) + mapResult := true + typeResult := true + structResult := true + resultField := true + switch subValidator := validator.(type) { + case map[string]interface{}: + var err error + if v, ok := value.(map[string]interface{}); !ok { + mapResult = false + err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } else { + mapResult, err = ValidateMap(v, subValidator) + if err != nil { + mapResult = false + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + } + case string: + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + subValidator != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + } + resultField, err = typeCheck(valueField, reflect.StructField{ + Name: key, + PkgPath: "", + Type: val.Type(), + Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), + Offset: 0, + Index: []int{index}, + Anonymous: false, + }, val, nil) + if err != nil { + errs = append(errs, err) + } + case nil: + // already handlerd when checked before + default: + typeResult = false + err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + result = result && presentResult && typeResult && resultField && structResult && mapResult + index++ + } + // check required keys + requiredResult := true + for key, value := range m { + if schema, ok := value.(string); ok { + tags := parseTagIntoMap(schema) + if required, ok := tags["required"]; ok { + if _, ok := s[key]; !ok { + requiredResult = false + if required.customErrorMessage != "" { + err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} + } else { + err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} + } + errs = append(errs, err) + } + } + } + } + + if len(errs) > 0 { + err = errs + } + return result && requiredResult, err +} + // ValidateStruct use tags for fields. // result will be equal to `false` if there are any errors. +// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) func ValidateStruct(s interface{}) (bool, error) { if s == nil { return true, nil @@ -856,12 +1039,29 @@ func IsSemver(str string) bool { return rxSemver.MatchString(str) } +// IsType check if interface is of some type +func IsType(v interface{}, params ...string) bool { + if len(params) == 1 { + typ := params[0] + return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) + } + return false +} + // IsTime check if string is valid according to given format func IsTime(str string, format string) bool { _, err := time.Parse(format, str) return err == nil } +// IsUnixTime check if string is valid unix timestamp value +func IsUnixTime(str string) bool { + if _, err := strconv.Atoi(str); err == nil { + return true + } + return false +} + // IsRFC3339 check if string is valid timestamp value according to RFC3339 func IsRFC3339(str string) bool { return IsTime(str, time.RFC3339) @@ -933,6 +1133,30 @@ func StringLength(str string, params ...string) bool { return false } +// MinStringLength check string's minimum length (including multi byte strings) +func MinStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + return strLength >= int(min) + } + + return false +} + +// MaxStringLength check string's maximum length (including multi byte strings) +func MaxStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + max, _ := ToInt(params[0]) + return strLength <= int(max) + } + + return false +} + // Range check string's length func Range(str string, params ...string) bool { if len(params) == 2 { @@ -945,7 +1169,7 @@ func Range(str string, params ...string) bool { return false } -func isInRaw(str string, params ...string) bool { +func IsInRaw(str string, params ...string) bool { if len(params) == 1 { rawParams := params[0] @@ -1014,7 +1238,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options options = parseTagIntoMap(tag) } - if isEmptyValue(v) { + if !isFieldSet(v) { // an empty value is not validated, check only required isValid, resultErr = checkRequired(v, t, options) for key := range options { @@ -1062,6 +1286,45 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options }() } + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for interface param validators + for key, value := range InterfaceParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := InterfaceParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + field := fmt.Sprint(v) + if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + } + } + switch v.Kind() { case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, @@ -1121,10 +1384,10 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options delete(options, validatorSpec) switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: field := fmt.Sprint(v) // make value into string, then validate with regex if result := validatefunc(field); !result && !negate || result && negate { if customMsgExists { @@ -1202,7 +1465,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options } return typeCheck(v.Elem(), t, o, options) case reflect.Struct: - return ValidateStruct(v.Interface()) + return true, nil default: return false, &UnsupportedTypeError{v.Type()} } @@ -1212,25 +1475,14 @@ func stripParams(validatorString string) string { return paramsRegexp.ReplaceAllString(validatorString, "") } -func isEmptyValue(v reflect.Value) bool { +// isFieldSet returns false for nil pointers, interfaces, maps, and slices. For all other values, it returns true. +func isFieldSet(v reflect.Value) bool { switch v.Kind() { - case reflect.String, reflect.Array: - return v.Len() == 0 - case reflect.Map, reflect.Slice: - return v.Len() == 0 || v.IsNil() - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() + case reflect.Map, reflect.Slice, reflect.Interface, reflect.Ptr: + return !v.IsNil() } - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) + return true } // ErrorByField returns error for specified field of the struct @@ -1252,11 +1504,11 @@ func ErrorsByField(e error) map[string]string { } // prototype for ValidateStruct - switch e.(type) { + switch e := e.(type) { case Error: - m[e.(Error).Name] = e.(Error).Err.Error() + m[e.Name] = e.Err.Error() case Errors: - for _, item := range e.(Errors).Errors() { + for _, item := range e.Errors() { n := ErrorsByField(item) for k, v := range n { m[k] = v diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml index cac7a5fcf..bc5f7b086 100644 --- a/vendor/github.com/asaskevich/govalidator/wercker.yml +++ b/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -12,4 +12,4 @@ build: - script: name: go test code: | - go test -race ./... + go test -race -v ./... diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml new file mode 100644 index 000000000..b94ff8cf9 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -0,0 +1,21 @@ +# Copyright (C) 2017 SUSE LLC. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +language: go +go: + - 1.13.x + - 1.16.x + - tip +arch: + - AMD64 + - ppc64le +os: + - linux + - osx + +script: + - go test -cover -v ./... + +notifications: + email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE new file mode 100644 index 000000000..bec842f29 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 000000000..3624617c8 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,79 @@ +## `filepath-securejoin` ## + +[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) + +An implementation of `SecureJoin`, a [candidate for inclusion in the Go +standard library][go#20126]. The purpose of this function is to be a "secure" +alternative to `filepath.Join`, and in particular it provides certain +guarantees that are not provided by `filepath.Join`. + +> **NOTE**: This code is *only* safe if you are not at risk of other processes +> modifying path components after you've used `SecureJoin`. If it is possible +> for a malicious process to modify path components of the resolved path, then +> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There +> are some Linux kernel patches I'm working on which might allow for a better +> solution.][lwn-obeneath] +> +> In addition, with a slightly modified API it might be possible to use +> `O_PATH` and verify that the opened path is actually the resolved one -- but +> I have not done that yet. I might add it in the future as a helper function +> to help users verify the path (we can't just return `/proc/self/fd/` +> because that doesn't always work transparently for all users). + +This is the function prototype: + +```go +func SecureJoin(root, unsafePath string) (string, error) +``` + +This library **guarantees** the following: + +* If no error is set, the resulting string **must** be a child path of + `root` and will not contain any symlink path components (they will all be + expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existent path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[lwn-obeneath]: https://lwn.net/Articles/767547/ +[go#20126]: https://github.com/golang/go/issues/20126 + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 000000000..717903969 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.2.3 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 000000000..7dd08dbbd --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,115 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" +) + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + var path bytes.Buffer + n := 0 + for unsafePath != "" { + if n > 255 { + return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} + } + + // Next path component, p. + i := strings.IndexRune(unsafePath, filepath.Separator) + var p string + if i == -1 { + p, unsafePath = unsafePath, "" + } else { + p, unsafePath = unsafePath[:i], unsafePath[i+1:] + } + + // Create a cleaned path, using the lexical semantics of /../a, to + // create a "scoped" path component which can safely be joined to fullP + // for evaluation. At this point, path.String() doesn't contain any + // symlink components. + cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) + if cleanP == string(filepath.Separator) { + path.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullP) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + path.WriteString(p) + path.WriteRune(filepath.Separator) + continue + } + + // Only increment when we actually dereference a link. + n++ + + // It's a symlink, expand it by prepending it to the yet-unparsed path. + dest, err := vfs.Readlink(fullP) + if err != nil { + return "", err + } + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + path.Reset() + } + unsafePath = dest + string(filepath.Separator) + unsafePath + } + + // We have to clean path.String() here because it may contain '..' + // components that are entirely lexical, but would be misleading otherwise. + // And finally do a final clean to ensure that root is also lexically + // clean. + fullP := filepath.Clean(string(filepath.Separator) + path.String()) + return filepath.Clean(root + fullP), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 000000000..a82a5eae1 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index dc2b7e51e..4bce5936d 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -568,29 +568,6 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } - if path == "" { - val := op.value() - - if val.which == eRaw { - if !val.tryDoc() { - if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") - } - } - } - - switch val.which { - case eAry: - *doc = &val.ary - case eDoc: - *doc = &val.doc - case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") - } - - return nil - } - con, key := findObject(doc, path) if con == nil { @@ -657,25 +634,6 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } - if path == "" { - var self lazyNode - - switch sv := (*doc).(type) { - case *partialDoc: - self.doc = *sv - self.which = eDoc - case *partialArray: - self.ary = *sv - self.which = eAry - } - - if self.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - con, key := findObject(doc, path) if con == nil { diff --git a/vendor/github.com/fatih/structs/.gitignore b/vendor/github.com/fatih/structs/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/fatih/structs/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml new file mode 100644 index 000000000..a08df7981 --- /dev/null +++ b/vendor/github.com/fatih/structs/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - tip +sudo: false +before_install: +- go get github.com/axw/gocov/gocov +- go get github.com/mattn/goveralls +- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +script: +- $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/fatih/structs/LICENSE b/vendor/github.com/fatih/structs/LICENSE new file mode 100644 index 000000000..34504e4b3 --- /dev/null +++ b/vendor/github.com/fatih/structs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md new file mode 100644 index 000000000..a75eabf37 --- /dev/null +++ b/vendor/github.com/fatih/structs/README.md @@ -0,0 +1,163 @@ +# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) + +Structs contains various utilities to work with Go (Golang) structs. It was +initially used by me to convert a struct into a `map[string]interface{}`. With +time I've added other utilities for structs. It's basically a high level +package based on primitives from the reflect package. Feel free to add new +functions or improve the existing code. + +## Install + +```bash +go get github.com/fatih/structs +``` + +## Usage and Examples + +Just like the standard lib `strings`, `bytes` and co packages, `structs` has +many global functions to manipulate or organize your struct data. Lets define +and declare a struct: + +```go +type Server struct { + Name string `json:"name,omitempty"` + ID int + Enabled bool + users []string // not exported + http.Server // embedded +} + +server := &Server{ + Name: "gopher", + ID: 123456, + Enabled: true, +} +``` + +```go +// Convert a struct to a map[string]interface{} +// => {"Name":"gopher", "ID":123456, "Enabled":true} +m := structs.Map(server) + +// Convert the values of a struct to a []interface{} +// => ["gopher", 123456, true] +v := structs.Values(server) + +// Convert the names of a struct to a []string +// (see "Names methods" for more info about fields) +n := structs.Names(server) + +// Convert the values of a struct to a []*Field +// (see "Field methods" for more info about fields) +f := structs.Fields(server) + +// Return the struct name => "Server" +n := structs.Name(server) + +// Check if any field of a struct is initialized or not. +h := structs.HasZero(server) + +// Check if all fields of a struct is initialized or not. +z := structs.IsZero(server) + +// Check if server is a struct or a pointer to struct +i := structs.IsStruct(server) +``` + +### Struct methods + +The structs functions can be also used as independent methods by creating a new +`*structs.Struct`. This is handy if you want to have more control over the +structs (such as retrieving a single Field). + +```go +// Create a new struct type: +s := structs.New(server) + +m := s.Map() // Get a map[string]interface{} +v := s.Values() // Get a []interface{} +f := s.Fields() // Get a []*Field +n := s.Names() // Get a []string +f := s.Field(name) // Get a *Field based on the given field name +f, ok := s.FieldOk(name) // Get a *Field based on the given field name +n := s.Name() // Get the struct name +h := s.HasZero() // Check if any field is uninitialized +z := s.IsZero() // Check if all fields are uninitialized +``` + +### Field methods + +We can easily examine a single Field for more detail. Below you can see how we +get and interact with various field methods: + + +```go +s := structs.New(server) + +// Get the Field struct for the "Name" field +name := s.Field("Name") + +// Get the underlying value, value => "gopher" +value := name.Value().(string) + +// Set the field's value +name.Set("another gopher") + +// Get the field's kind, kind => "string" +name.Kind() + +// Check if the field is exported or not +if name.IsExported() { + fmt.Println("Name field is exported") +} + +// Check if the value is a zero value, such as "" for string, 0 for int +if !name.IsZero() { + fmt.Println("Name is initialized") +} + +// Check if the field is an anonymous (embedded) field +if !name.IsEmbedded() { + fmt.Println("Name is not an embedded field") +} + +// Get the Field's tag value for tag name "json", tag value => "name,omitempty" +tagValue := name.Tag("json") +``` + +Nested structs are supported too: + +```go +addrField := s.Field("Server").Field("Addr") + +// Get the value for addr +a := addrField.Value().(string) + +// Or get all fields +httpServer := s.Field("Server").Fields() +``` + +We can also get a slice of Fields from the Struct type to iterate over all +fields. This is handy if you wish to examine all fields: + +```go +s := structs.New(server) + +for _, f := range s.Fields() { + fmt.Printf("field name: %+v\n", f.Name()) + + if f.IsExported() { + fmt.Printf("value : %+v\n", f.Value()) + fmt.Printf("is zero : %+v\n", f.IsZero()) + } +} +``` + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * [Cihangir Savas](https://github.com/cihangir) + +## License + +The MIT License (MIT) - see LICENSE.md for more details diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go new file mode 100644 index 000000000..e69783230 --- /dev/null +++ b/vendor/github.com/fatih/structs/field.go @@ -0,0 +1,141 @@ +package structs + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + errNotExported = errors.New("field is not exported") + errNotSettable = errors.New("field is not settable") +) + +// Field represents a single struct field that encapsulates high level +// functions around the field. +type Field struct { + value reflect.Value + field reflect.StructField + defaultTag string +} + +// Tag returns the value associated with key in the tag string. If there is no +// such key in the tag, Tag returns the empty string. +func (f *Field) Tag(key string) string { + return f.field.Tag.Get(key) +} + +// Value returns the underlying value of the field. It panics if the field +// is not exported. +func (f *Field) Value() interface{} { + return f.value.Interface() +} + +// IsEmbedded returns true if the given field is an anonymous field (embedded) +func (f *Field) IsEmbedded() bool { + return f.field.Anonymous +} + +// IsExported returns true if the given field is exported. +func (f *Field) IsExported() bool { + return f.field.PkgPath == "" +} + +// IsZero returns true if the given field is not initialized (has a zero value). +// It panics if the field is not exported. +func (f *Field) IsZero() bool { + zero := reflect.Zero(f.value.Type()).Interface() + current := f.Value() + + return reflect.DeepEqual(current, zero) +} + +// Name returns the name of the given field +func (f *Field) Name() string { + return f.field.Name +} + +// Kind returns the fields kind, such as "string", "map", "bool", etc .. +func (f *Field) Kind() reflect.Kind { + return f.value.Kind() +} + +// Set sets the field to given value v. It returns an error if the field is not +// settable (not addressable or not exported) or if the given value's type +// doesn't match the fields type. +func (f *Field) Set(val interface{}) error { + // we can't set unexported fields, so be sure this field is exported + if !f.IsExported() { + return errNotExported + } + + // do we get here? not sure... + if !f.value.CanSet() { + return errNotSettable + } + + given := reflect.ValueOf(val) + + if f.value.Kind() != given.Kind() { + return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind()) + } + + f.value.Set(given) + return nil +} + +// Zero sets the field to its zero value. It returns an error if the field is not +// settable (not addressable or not exported). +func (f *Field) Zero() error { + zero := reflect.Zero(f.value.Type()).Interface() + return f.Set(zero) +} + +// Fields returns a slice of Fields. This is particular handy to get the fields +// of a nested struct . A struct tag with the content of "-" ignores the +// checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field *http.Request `structs:"-"` +// +// It panics if field is not exported or if field's kind is not struct +func (f *Field) Fields() []*Field { + return getFields(f.value, f.defaultTag) +} + +// Field returns the field from a nested struct. It panics if the nested struct +// is not exported or if the field was not found. +func (f *Field) Field(name string) *Field { + field, ok := f.FieldOk(name) + if !ok { + panic("field not found") + } + + return field +} + +// FieldOk returns the field from a nested struct. The boolean returns whether +// the field was found (true) or not (false). +func (f *Field) FieldOk(name string) (*Field, bool) { + value := &f.value + // value must be settable so we need to make sure it holds the address of the + // variable and not a copy, so we can pass the pointer to strctVal instead of a + // copy (which is not assigned to any variable, hence not settable). + // see "https://blog.golang.org/laws-of-reflection#TOC_8." + if f.value.Kind() != reflect.Ptr { + a := f.value.Addr() + value = &a + } + v := strctVal(value.Interface()) + t := v.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: v.FieldByName(name), + }, true +} diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go new file mode 100644 index 000000000..3a8770652 --- /dev/null +++ b/vendor/github.com/fatih/structs/structs.go @@ -0,0 +1,584 @@ +// Package structs contains various utilities functions to work with structs. +package structs + +import ( + "fmt" + + "reflect" +) + +var ( + // DefaultTagName is the default tag name for struct fields which provides + // a more granular to tweak certain structs. Lookup the necessary functions + // for more info. + DefaultTagName = "structs" // struct's field default tag name +) + +// Struct encapsulates a struct type to provide several high level functions +// around the struct. +type Struct struct { + raw interface{} + value reflect.Value + TagName string +} + +// New returns a new *Struct with the struct s. It panics if the s's kind is +// not struct. +func New(s interface{}) *Struct { + return &Struct{ + raw: s, + value: strctVal(s), + TagName: DefaultTagName, + } +} + +// Map converts the given struct to a map[string]interface{}, where the keys +// of the map are the field names and the values of the map the associated +// values of the fields. The default key string is the struct field name but +// can be changed in the struct field's tag value. The "structs" key in the +// struct's field tag value is the key name. Example: +// +// // Field appears in map as key "myName". +// Name string `structs:"myName"` +// +// A tag value with the content of "-" ignores that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A tag value with the content of "string" uses the stringer to get the value. Example: +// +// // The value will be output of Animal's String() func. +// // Map will panic if Animal does not implement String(). +// Field *Animal `structs:"field,string"` +// +// A tag value with the option of "flatten" used in a struct field is to flatten its fields +// in the output map. Example: +// +// // The FieldStruct's fields will be flattened into the output map. +// FieldStruct time.Time `structs:",flatten"` +// +// A tag value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field if +// the field value is empty. Example: +// +// // Field appears in map as key "myName", but the field is +// // skipped if empty. +// Field string `structs:"myName,omitempty"` +// +// // Field appears in map as key "Field" (the default), but +// // the field is skipped if empty. +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Map() map[string]interface{} { + out := make(map[string]interface{}) + s.FillMap(out) + return out +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func (s *Struct) FillMap(out map[string]interface{}) { + if out == nil { + return + } + + fields := s.structFields() + + for _, field := range fields { + name := field.Name + val := s.value.FieldByName(name) + isSubStruct := false + var finalVal interface{} + + tagName, tagOpts := parseTag(field.Tag.Get(s.TagName)) + if tagName != "" { + name = tagName + } + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if !tagOpts.Has("omitnested") { + finalVal = s.nested(val) + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Map, reflect.Struct: + isSubStruct = true + } + } else { + finalVal = val.Interface() + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + out[name] = s.String() + } + continue + } + + if isSubStruct && (tagOpts.Has("flatten")) { + for k := range finalVal.(map[string]interface{}) { + out[k] = finalVal.(map[string]interface{})[k] + } + } else { + out[name] = finalVal + } + } +} + +// Values converts the given s struct's field values to a []interface{}. A +// struct tag with the content of "-" ignores the that particular field. +// Example: +// +// // Field is ignored by this package. +// Field int `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Fields is not processed further by this package. +// Field time.Time `structs:",omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field and +// is not added to the values if the field value is empty. Example: +// +// // Field is skipped if empty +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Values() []interface{} { + fields := s.structFields() + + var t []interface{} + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + t = append(t, s.String()) + } + continue + } + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + // look out for embedded structs, and convert them to a + // []interface{} to be added to the final values slice + t = append(t, Values(val.Interface())...) + } else { + t = append(t, val.Interface()) + } + } + + return t +} + +// Fields returns a slice of Fields. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Fields() []*Field { + return getFields(s.value, s.TagName) +} + +// Names returns a slice of field names. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Names() []string { + fields := getFields(s.value, s.TagName) + + names := make([]string, len(fields)) + + for i, field := range fields { + names[i] = field.Name() + } + + return names +} + +func getFields(v reflect.Value, tagName string) []*Field { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + t := v.Type() + + var fields []*Field + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if tag := field.Tag.Get(tagName); tag == "-" { + continue + } + + f := &Field{ + field: field, + value: v.FieldByName(field.Name), + } + + fields = append(fields, f) + + } + + return fields +} + +// Field returns a new Field struct that provides several high level functions +// around a single struct field entity. It panics if the field is not found. +func (s *Struct) Field(name string) *Field { + f, ok := s.FieldOk(name) + if !ok { + panic("field not found") + } + + return f +} + +// FieldOk returns a new Field struct that provides several high level functions +// around a single struct field entity. The boolean returns true if the field +// was found. +func (s *Struct) FieldOk(name string) (*Field, bool) { + t := s.value.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: s.value.FieldByName(name), + defaultTag: s.TagName, + }, true +} + +// IsZero returns true if all fields in a struct is a zero value (not +// initialized) A struct tag with the content of "-" ignores the checking of +// that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) IsZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := IsZero(val.Interface()) + if !ok { + return false + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if !reflect.DeepEqual(current, zero) { + return false + } + } + + return true +} + +// HasZero returns true if a field in a struct is not initialized (zero value). +// A struct tag with the content of "-" ignores the checking of that particular +// field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) HasZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := HasZero(val.Interface()) + if ok { + return true + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + return true + } + } + + return false +} + +// Name returns the structs's type name within its package. For more info refer +// to Name() function. +func (s *Struct) Name() string { + return s.value.Type().Name() +} + +// structFields returns the exported struct fields for a given s struct. This +// is a convenient helper method to avoid duplicate code in some of the +// functions. +func (s *Struct) structFields() []reflect.StructField { + t := s.value.Type() + + var f []reflect.StructField + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // we can't access the value of unexported fields + if field.PkgPath != "" { + continue + } + + // don't check if it's omitted + if tag := field.Tag.Get(s.TagName); tag == "-" { + continue + } + + f = append(f, field) + } + + return f +} + +func strctVal(s interface{}) reflect.Value { + v := reflect.ValueOf(s) + + // if pointer get the underlying element≤ + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + if v.Kind() != reflect.Struct { + panic("not struct") + } + + return v +} + +// Map converts the given struct to a map[string]interface{}. For more info +// refer to Struct types Map() method. It panics if s's kind is not struct. +func Map(s interface{}) map[string]interface{} { + return New(s).Map() +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func FillMap(s interface{}, out map[string]interface{}) { + New(s).FillMap(out) +} + +// Values converts the given struct to a []interface{}. For more info refer to +// Struct types Values() method. It panics if s's kind is not struct. +func Values(s interface{}) []interface{} { + return New(s).Values() +} + +// Fields returns a slice of *Field. For more info refer to Struct types +// Fields() method. It panics if s's kind is not struct. +func Fields(s interface{}) []*Field { + return New(s).Fields() +} + +// Names returns a slice of field names. For more info refer to Struct types +// Names() method. It panics if s's kind is not struct. +func Names(s interface{}) []string { + return New(s).Names() +} + +// IsZero returns true if all fields is equal to a zero value. For more info +// refer to Struct types IsZero() method. It panics if s's kind is not struct. +func IsZero(s interface{}) bool { + return New(s).IsZero() +} + +// HasZero returns true if any field is equal to a zero value. For more info +// refer to Struct types HasZero() method. It panics if s's kind is not struct. +func HasZero(s interface{}) bool { + return New(s).HasZero() +} + +// IsStruct returns true if the given variable is a struct or a pointer to +// struct. +func IsStruct(s interface{}) bool { + v := reflect.ValueOf(s) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + // uninitialized zero value of a struct + if v.Kind() == reflect.Invalid { + return false + } + + return v.Kind() == reflect.Struct +} + +// Name returns the structs's type name within its package. It returns an +// empty string for unnamed types. It panics if s's kind is not struct. +func Name(s interface{}) string { + return New(s).Name() +} + +// nested retrieves recursively all types for the given value and returns the +// nested value. +func (s *Struct) nested(val reflect.Value) interface{} { + var finalVal interface{} + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + n := New(val.Interface()) + n.TagName = s.TagName + m := n.Map() + + // do not add the converted value if there are no exported fields, ie: + // time.Time + if len(m) == 0 { + finalVal = val.Interface() + } else { + finalVal = m + } + case reflect.Map: + // get the element type of the map + mapElem := val.Type() + switch val.Type().Kind() { + case reflect.Ptr, reflect.Array, reflect.Map, + reflect.Slice, reflect.Chan: + mapElem = val.Type().Elem() + if mapElem.Kind() == reflect.Ptr { + mapElem = mapElem.Elem() + } + } + + // only iterate over struct types, ie: map[string]StructType, + // map[string][]StructType, + if mapElem.Kind() == reflect.Struct || + (mapElem.Kind() == reflect.Slice && + mapElem.Elem().Kind() == reflect.Struct) { + m := make(map[string]interface{}, val.Len()) + for _, k := range val.MapKeys() { + m[k.String()] = s.nested(val.MapIndex(k)) + } + finalVal = m + break + } + + // TODO(arslan): should this be optional? + finalVal = val.Interface() + case reflect.Slice, reflect.Array: + if val.Type().Kind() == reflect.Interface { + finalVal = val.Interface() + break + } + + // TODO(arslan): should this be optional? + // do not iterate of non struct types, just pass the value. Ie: []int, + // []string, co... We only iterate further if it's a struct. + // i.e []foo or []*foo + if val.Type().Elem().Kind() != reflect.Struct && + !(val.Type().Elem().Kind() == reflect.Ptr && + val.Type().Elem().Elem().Kind() == reflect.Struct) { + finalVal = val.Interface() + break + } + + slices := make([]interface{}, val.Len()) + for x := 0; x < val.Len(); x++ { + slices[x] = s.nested(val.Index(x)) + } + finalVal = slices + default: + finalVal = val.Interface() + } + + return finalVal +} diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go new file mode 100644 index 000000000..136a31eba --- /dev/null +++ b/vendor/github.com/fatih/structs/tags.go @@ -0,0 +1,32 @@ +package structs + +import "strings" + +// tagOptions contains a slice of tag options +type tagOptions []string + +// Has returns true if the given option is available in tagOptions +func (t tagOptions) Has(opt string) bool { + for _, tagOpt := range t { + if tagOpt == opt { + return true + } + } + + return false +} + +// parseTag splits a struct field's tag into its name and a list of options +// which comes after a name. A tag is in the form of: "name,option1,option2". +// The name can be neglectected. +func parseTag(tag string) (string, tagOptions) { + // tag is one of followings: + // "" + // "name" + // "name,opt" + // "name,opt,opt2" + // ",opt" + + res := strings.Split(tag, ",") + return res[0], res[1:] +} diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore new file mode 100644 index 000000000..b4ae623be --- /dev/null +++ b/vendor/github.com/gobwas/glob/.gitignore @@ -0,0 +1,8 @@ +glob.iml +.idea +*.cpu +*.mem +*.test +*.dot +*.png +*.svg diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml new file mode 100644 index 000000000..e8a276826 --- /dev/null +++ b/vendor/github.com/gobwas/glob/.travis.yml @@ -0,0 +1,9 @@ +sudo: false + +language: go + +go: + - 1.5.3 + +script: + - go test -v ./... diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE new file mode 100644 index 000000000..9d4735cad --- /dev/null +++ b/vendor/github.com/gobwas/glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Sergey Kamardin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh new file mode 100644 index 000000000..804cf22e6 --- /dev/null +++ b/vendor/github.com/gobwas/glob/bench.sh @@ -0,0 +1,26 @@ +#! /bin/bash + +bench() { + filename="/tmp/$1-$2.bench" + if test -e "${filename}"; + then + echo "Already exists ${filename}" + else + backup=`git rev-parse --abbrev-ref HEAD` + git checkout $1 + echo -n "Creating ${filename}... " + go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem + echo "OK" + git checkout ${backup} + sleep 5 + fi +} + + +to=$1 +current=`git rev-parse --abbrev-ref HEAD` + +bench ${to} $2 +bench ${current} $2 + +benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go new file mode 100644 index 000000000..02e7de80a --- /dev/null +++ b/vendor/github.com/gobwas/glob/compiler/compiler.go @@ -0,0 +1,525 @@ +package compiler + +// TODO use constructor with all matchers, and to their structs private +// TODO glue multiple Text nodes (like after QuoteMeta) + +import ( + "fmt" + "reflect" + + "github.com/gobwas/glob/match" + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/util/runes" +) + +func optimizeMatcher(matcher match.Matcher) match.Matcher { + switch m := matcher.(type) { + + case match.Any: + if len(m.Separators) == 0 { + return match.NewSuper() + } + + case match.AnyOf: + if len(m.Matchers) == 1 { + return m.Matchers[0] + } + + return m + + case match.List: + if m.Not == false && len(m.List) == 1 { + return match.NewText(string(m.List)) + } + + return m + + case match.BTree: + m.Left = optimizeMatcher(m.Left) + m.Right = optimizeMatcher(m.Right) + + r, ok := m.Value.(match.Text) + if !ok { + return m + } + + var ( + leftNil = m.Left == nil + rightNil = m.Right == nil + ) + if leftNil && rightNil { + return match.NewText(r.Str) + } + + _, leftSuper := m.Left.(match.Super) + lp, leftPrefix := m.Left.(match.Prefix) + la, leftAny := m.Left.(match.Any) + + _, rightSuper := m.Right.(match.Super) + rs, rightSuffix := m.Right.(match.Suffix) + ra, rightAny := m.Right.(match.Any) + + switch { + case leftSuper && rightSuper: + return match.NewContains(r.Str, false) + + case leftSuper && rightNil: + return match.NewSuffix(r.Str) + + case rightSuper && leftNil: + return match.NewPrefix(r.Str) + + case leftNil && rightSuffix: + return match.NewPrefixSuffix(r.Str, rs.Suffix) + + case rightNil && leftPrefix: + return match.NewPrefixSuffix(lp.Prefix, r.Str) + + case rightNil && leftAny: + return match.NewSuffixAny(r.Str, la.Separators) + + case leftNil && rightAny: + return match.NewPrefixAny(r.Str, ra.Separators) + } + + return m + } + + return matcher +} + +func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { + if len(matchers) == 0 { + return nil, fmt.Errorf("compile error: need at least one matcher") + } + if len(matchers) == 1 { + return matchers[0], nil + } + if m := glueMatchers(matchers); m != nil { + return m, nil + } + + idx := -1 + maxLen := -1 + var val match.Matcher + for i, matcher := range matchers { + if l := matcher.Len(); l != -1 && l >= maxLen { + maxLen = l + idx = i + val = matcher + } + } + + if val == nil { // not found matcher with static length + r, err := compileMatchers(matchers[1:]) + if err != nil { + return nil, err + } + return match.NewBTree(matchers[0], nil, r), nil + } + + left := matchers[:idx] + var right []match.Matcher + if len(matchers) > idx+1 { + right = matchers[idx+1:] + } + + var l, r match.Matcher + var err error + if len(left) > 0 { + l, err = compileMatchers(left) + if err != nil { + return nil, err + } + } + + if len(right) > 0 { + r, err = compileMatchers(right) + if err != nil { + return nil, err + } + } + + return match.NewBTree(val, l, r), nil +} + +func glueMatchers(matchers []match.Matcher) match.Matcher { + if m := glueMatchersAsEvery(matchers); m != nil { + return m + } + if m := glueMatchersAsRow(matchers); m != nil { + return m + } + return nil +} + +func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + c []match.Matcher + l int + ) + for _, matcher := range matchers { + if ml := matcher.Len(); ml == -1 { + return nil + } else { + c = append(c, matcher) + l += ml + } + } + return match.NewRow(l, c...) +} + +func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + hasAny bool + hasSuper bool + hasSingle bool + min int + separator []rune + ) + + for i, matcher := range matchers { + var sep []rune + + switch m := matcher.(type) { + case match.Super: + sep = []rune{} + hasSuper = true + + case match.Any: + sep = m.Separators + hasAny = true + + case match.Single: + sep = m.Separators + hasSingle = true + min++ + + case match.List: + if !m.Not { + return nil + } + sep = m.List + hasSingle = true + min++ + + default: + return nil + } + + // initialize + if i == 0 { + separator = sep + } + + if runes.Equal(sep, separator) { + continue + } + + return nil + } + + if hasSuper && !hasAny && !hasSingle { + return match.NewSuper() + } + + if hasAny && !hasSuper && !hasSingle { + return match.NewAny(separator) + } + + if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { + return match.NewMin(min) + } + + every := match.NewEveryOf() + + if min > 0 { + every.Add(match.NewMin(min)) + + if !hasAny && !hasSuper { + every.Add(match.NewMax(min)) + } + } + + if len(separator) > 0 { + every.Add(match.NewContains(string(separator), true)) + } + + return every +} + +func minimizeMatchers(matchers []match.Matcher) []match.Matcher { + var done match.Matcher + var left, right, count int + + for l := 0; l < len(matchers); l++ { + for r := len(matchers); r > l; r-- { + if glued := glueMatchers(matchers[l:r]); glued != nil { + var swap bool + + if done == nil { + swap = true + } else { + cl, gl := done.Len(), glued.Len() + swap = cl > -1 && gl > -1 && gl > cl + swap = swap || count < r-l + } + + if swap { + done = glued + left = l + right = r + count = r - l + } + } + } + } + + if done == nil { + return matchers + } + + next := append(append([]match.Matcher{}, matchers[:left]...), done) + if right < len(matchers) { + next = append(next, matchers[right:]...) + } + + if len(next) == len(matchers) { + return next + } + + return minimizeMatchers(next) +} + +// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree +func minimizeTree(tree *ast.Node) *ast.Node { + switch tree.Kind { + case ast.KindAnyOf: + return minimizeTreeAnyOf(tree) + default: + return nil + } +} + +// minimizeAnyOf tries to find common children of given node of AnyOf pattern +// it searches for common children from left and from right +// if any common children are found – then it returns new optimized ast tree +// else it returns nil +func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { + if !areOfSameKind(tree.Children, ast.KindPattern) { + return nil + } + + commonLeft, commonRight := commonChildren(tree.Children) + commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) + if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts + return nil + } + + var result []*ast.Node + if commonLeftCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) + } + + var anyOf []*ast.Node + for _, child := range tree.Children { + reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] + var node *ast.Node + if len(reuse) == 0 { + // this pattern is completely reduced by commonLeft and commonRight patterns + // so it become nothing + node = ast.NewNode(ast.KindNothing, nil) + } else { + node = ast.NewNode(ast.KindPattern, nil, reuse...) + } + anyOf = appendIfUnique(anyOf, node) + } + switch { + case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: + result = append(result, anyOf[0]) + case len(anyOf) > 1: + result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) + } + + if commonRightCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) + } + + return ast.NewNode(ast.KindPattern, nil, result...) +} + +func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { + if len(nodes) <= 1 { + return + } + + // find node that has least number of children + idx := leastChildren(nodes) + if idx == -1 { + return + } + tree := nodes[idx] + treeLength := len(tree.Children) + + // allocate max able size for rightCommon slice + // to get ability insert elements in reverse order (from end to start) + // without sorting + commonRight = make([]*ast.Node, treeLength) + lastRight := treeLength // will use this to get results as commonRight[lastRight:] + + var ( + breakLeft bool + breakRight bool + commonTotal int + ) + for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { + treeLeft := tree.Children[i] + treeRight := tree.Children[j] + + for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { + // skip least children node + if k == idx { + continue + } + + restLeft := nodes[k].Children[i] + restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] + + breakLeft = breakLeft || !treeLeft.Equal(restLeft) + + // disable searching for right common parts, if left part is already overlapping + breakRight = breakRight || (!breakLeft && j <= i) + breakRight = breakRight || !treeRight.Equal(restRight) + } + + if !breakLeft { + commonTotal++ + commonLeft = append(commonLeft, treeLeft) + } + if !breakRight { + commonTotal++ + lastRight = j + commonRight[j] = treeRight + } + } + + commonRight = commonRight[lastRight:] + + return +} + +func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { + for _, n := range target { + if reflect.DeepEqual(n, val) { + return target + } + } + return append(target, val) +} + +func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { + for _, n := range nodes { + if n.Kind != kind { + return false + } + } + return true +} + +func leastChildren(nodes []*ast.Node) int { + min := -1 + idx := -1 + for i, n := range nodes { + if idx == -1 || (len(n.Children) < min) { + min = len(n.Children) + idx = i + } + } + return idx +} + +func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { + var matchers []match.Matcher + for _, desc := range tree.Children { + m, err := compile(desc, sep) + if err != nil { + return nil, err + } + matchers = append(matchers, optimizeMatcher(m)) + } + return matchers, nil +} + +func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { + switch tree.Kind { + case ast.KindAnyOf: + // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) + if n := minimizeTree(tree); n != nil { + return compile(n, sep) + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + return match.NewAnyOf(matchers...), nil + + case ast.KindPattern: + if len(tree.Children) == 0 { + return match.NewNothing(), nil + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + m, err = compileMatchers(minimizeMatchers(matchers)) + if err != nil { + return nil, err + } + + case ast.KindAny: + m = match.NewAny(sep) + + case ast.KindSuper: + m = match.NewSuper() + + case ast.KindSingle: + m = match.NewSingle(sep) + + case ast.KindNothing: + m = match.NewNothing() + + case ast.KindList: + l := tree.Value.(ast.List) + m = match.NewList([]rune(l.Chars), l.Not) + + case ast.KindRange: + r := tree.Value.(ast.Range) + m = match.NewRange(r.Lo, r.Hi, r.Not) + + case ast.KindText: + t := tree.Value.(ast.Text) + m = match.NewText(t.Text) + + default: + return nil, fmt.Errorf("could not compile tree: unknown node type") + } + + return optimizeMatcher(m), nil +} + +func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { + m, err := compile(tree, sep) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go new file mode 100644 index 000000000..2afde343a --- /dev/null +++ b/vendor/github.com/gobwas/glob/glob.go @@ -0,0 +1,80 @@ +package glob + +import ( + "github.com/gobwas/glob/compiler" + "github.com/gobwas/glob/syntax" +) + +// Glob represents compiled glob pattern. +type Glob interface { + Match(string) bool +} + +// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. +// The pattern syntax is: +// +// pattern: +// { term } +// +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c +// +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi +// +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns +// +func Compile(pattern string, separators ...rune) (Glob, error) { + ast, err := syntax.Parse(pattern) + if err != nil { + return nil, err + } + + matcher, err := compiler.Compile(ast, separators) + if err != nil { + return nil, err + } + + return matcher, nil +} + +// MustCompile is the same as Compile, except that if Compile returns error, this will panic +func MustCompile(pattern string, separators ...rune) Glob { + g, err := Compile(pattern, separators...) + if err != nil { + panic(err) + } + + return g +} + +// QuoteMeta returns a string that quotes all glob pattern meta characters +// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. +func QuoteMeta(s string) string { + b := make([]byte, 2*len(s)) + + // a byte loop is correct because all meta characters are ASCII + j := 0 + for i := 0; i < len(s); i++ { + if syntax.Special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + + return string(b[0:j]) +} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go new file mode 100644 index 000000000..514a9a5c4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/strings" +) + +type Any struct { + Separators []rune +} + +func NewAny(s []rune) Any { + return Any{s} +} + +func (self Any) Match(s string) bool { + return strings.IndexAnyRunes(s, self.Separators) == -1 +} + +func (self Any) Index(s string) (int, []int) { + found := strings.IndexAnyRunes(s, self.Separators) + switch found { + case -1: + case 0: + return 0, segments0 + default: + s = s[:found] + } + + segments := acquireSegments(len(s)) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Any) Len() int { + return lenNo +} + +func (self Any) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go new file mode 100644 index 000000000..8e65356cd --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any_of.go @@ -0,0 +1,82 @@ +package match + +import "fmt" + +type AnyOf struct { + Matchers Matchers +} + +func NewAnyOf(m ...Matcher) AnyOf { + return AnyOf{Matchers(m)} +} + +func (self *AnyOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self AnyOf) Match(s string) bool { + for _, m := range self.Matchers { + if m.Match(s) { + return true + } + } + + return false +} + +func (self AnyOf) Index(s string) (int, []int) { + index := -1 + + segments := acquireSegments(len(s)) + for _, m := range self.Matchers { + idx, seg := m.Index(s) + if idx == -1 { + continue + } + + if index == -1 || idx < index { + index = idx + segments = append(segments[:0], seg...) + continue + } + + if idx > index { + continue + } + + // here idx == index + segments = appendMerge(segments, seg) + } + + if index == -1 { + releaseSegments(segments) + return -1, nil + } + + return index, segments +} + +func (self AnyOf) Len() (l int) { + l = -1 + for _, m := range self.Matchers { + ml := m.Len() + switch { + case l == -1: + l = ml + continue + + case ml == -1: + return -1 + + case l != ml: + return -1 + } + } + + return +} + +func (self AnyOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go new file mode 100644 index 000000000..a8130e93e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/btree.go @@ -0,0 +1,146 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type BTree struct { + Value Matcher + Left Matcher + Right Matcher + ValueLengthRunes int + LeftLengthRunes int + RightLengthRunes int + LengthRunes int +} + +func NewBTree(Value, Left, Right Matcher) (tree BTree) { + tree.Value = Value + tree.Left = Left + tree.Right = Right + + lenOk := true + if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { + lenOk = false + } + + if Left != nil { + if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { + lenOk = false + } + } + + if Right != nil { + if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { + lenOk = false + } + } + + if lenOk { + tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes + } else { + tree.LengthRunes = -1 + } + + return tree +} + +func (self BTree) Len() int { + return self.LengthRunes +} + +// todo? +func (self BTree) Index(s string) (int, []int) { + return -1, nil +} + +func (self BTree) Match(s string) bool { + inputLen := len(s) + + // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part + // here we manipulating byte length for better optimizations + // but these checks still works, cause minLen of 1-rune string is 1 byte. + if self.LengthRunes != -1 && self.LengthRunes > inputLen { + return false + } + + // try to cut unnecessary parts + // by knowledge of length of right and left part + var offset, limit int + if self.LeftLengthRunes >= 0 { + offset = self.LeftLengthRunes + } + if self.RightLengthRunes >= 0 { + limit = inputLen - self.RightLengthRunes + } else { + limit = inputLen + } + + for offset < limit { + // search for matching part in substring + index, segments := self.Value.Index(s[offset:limit]) + if index == -1 { + releaseSegments(segments) + return false + } + + l := s[:offset+index] + var left bool + if self.Left != nil { + left = self.Left.Match(l) + } else { + left = l == "" + } + + if left { + for i := len(segments) - 1; i >= 0; i-- { + length := segments[i] + + var right bool + var r string + // if there is no string for the right branch + if inputLen <= offset+index+length { + r = "" + } else { + r = s[offset+index+length:] + } + + if self.Right != nil { + right = self.Right.Match(r) + } else { + right = r == "" + } + + if right { + releaseSegments(segments) + return true + } + } + } + + _, step := utf8.DecodeRuneInString(s[offset+index:]) + offset += index + step + + releaseSegments(segments) + } + + return false +} + +func (self BTree) String() string { + const n string = "" + var l, r string + if self.Left == nil { + l = n + } else { + l = self.Left.String() + } + if self.Right == nil { + r = n + } else { + r = self.Right.String() + } + + return fmt.Sprintf("%s]>", l, self.Value, r) +} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go new file mode 100644 index 000000000..0998e95b0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/contains.go @@ -0,0 +1,58 @@ +package match + +import ( + "fmt" + "strings" +) + +type Contains struct { + Needle string + Not bool +} + +func NewContains(needle string, not bool) Contains { + return Contains{needle, not} +} + +func (self Contains) Match(s string) bool { + return strings.Contains(s, self.Needle) != self.Not +} + +func (self Contains) Index(s string) (int, []int) { + var offset int + + idx := strings.Index(s, self.Needle) + + if !self.Not { + if idx == -1 { + return -1, nil + } + + offset = idx + len(self.Needle) + if len(s) <= offset { + return 0, []int{offset} + } + s = s[offset:] + } else if idx != -1 { + s = s[:idx] + } + + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, offset+i) + } + + return 0, append(segments, offset+len(s)) +} + +func (self Contains) Len() int { + return lenNo +} + +func (self Contains) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, self.Needle) +} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go new file mode 100644 index 000000000..7c968ee36 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/every_of.go @@ -0,0 +1,99 @@ +package match + +import ( + "fmt" +) + +type EveryOf struct { + Matchers Matchers +} + +func NewEveryOf(m ...Matcher) EveryOf { + return EveryOf{Matchers(m)} +} + +func (self *EveryOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self EveryOf) Len() (l int) { + for _, m := range self.Matchers { + if ml := m.Len(); l > 0 { + l += ml + } else { + return -1 + } + } + + return +} + +func (self EveryOf) Index(s string) (int, []int) { + var index int + var offset int + + // make `in` with cap as len(s), + // cause it is the maximum size of output segments values + next := acquireSegments(len(s)) + current := acquireSegments(len(s)) + + sub := s + for i, m := range self.Matchers { + idx, seg := m.Index(sub) + if idx == -1 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + if i == 0 { + // we use copy here instead of `current = seg` + // cause seg is a slice from reusable buffer `in` + // and it could be overwritten in next iteration + current = append(current, seg...) + } else { + // clear the next + next = next[:0] + + delta := index - (idx + offset) + for _, ex := range current { + for _, n := range seg { + if ex+delta == n { + next = append(next, n) + } + } + } + + if len(next) == 0 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + current = append(current[:0], next...) + } + + index = idx + offset + sub = s[index:] + offset += idx + } + + releaseSegments(next) + + return index, current +} + +func (self EveryOf) Match(s string) bool { + for _, m := range self.Matchers { + if !m.Match(s) { + return false + } + } + + return true +} + +func (self EveryOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go new file mode 100644 index 000000000..7fd763ecd --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +type List struct { + List []rune + Not bool +} + +func NewList(list []rune, not bool) List { + return List{list, not} +} + +func (self List) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inList := runes.IndexRune(self.List, r) != -1 + return inList == !self.Not +} + +func (self List) Len() int { + return lenOne +} + +func (self List) Index(s string) (int, []int) { + for i, r := range s { + if self.Not == (runes.IndexRune(self.List, r) == -1) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self List) String() string { + var not string + if self.Not { + not = "!" + } + + return fmt.Sprintf("", not, string(self.List)) +} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go new file mode 100644 index 000000000..f80e007fb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/match.go @@ -0,0 +1,81 @@ +package match + +// todo common table of rune's length + +import ( + "fmt" + "strings" +) + +const lenOne = 1 +const lenZero = 0 +const lenNo = -1 + +type Matcher interface { + Match(string) bool + Index(string) (int, []int) + Len() int + String() string +} + +type Matchers []Matcher + +func (m Matchers) String() string { + var s []string + for _, matcher := range m { + s = append(s, fmt.Sprint(matcher)) + } + + return fmt.Sprintf("%s", strings.Join(s, ",")) +} + +// appendMerge merges and sorts given already SORTED and UNIQUE segments. +func appendMerge(target, sub []int) []int { + lt, ls := len(target), len(sub) + out := make([]int, 0, lt+ls) + + for x, y := 0, 0; x < lt || y < ls; { + if x >= lt { + out = append(out, sub[y:]...) + break + } + + if y >= ls { + out = append(out, target[x:]...) + break + } + + xValue := target[x] + yValue := sub[y] + + switch { + + case xValue == yValue: + out = append(out, xValue) + x++ + y++ + + case xValue < yValue: + out = append(out, xValue) + x++ + + case yValue < xValue: + out = append(out, yValue) + y++ + + } + } + + target = append(target[:0], out...) + + return target +} + +func reverseSegments(input []int) { + l := len(input) + m := l / 2 + + for i := 0; i < m; i++ { + input[i], input[l-i-1] = input[l-i-1], input[i] + } +} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go new file mode 100644 index 000000000..d72f69eff --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/max.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Max struct { + Limit int +} + +func NewMax(l int) Max { + return Max{l} +} + +func (self Max) Match(s string) bool { + var l int + for range s { + l += 1 + if l > self.Limit { + return false + } + } + + return true +} + +func (self Max) Index(s string) (int, []int) { + segments := acquireSegments(self.Limit + 1) + segments = append(segments, 0) + var count int + for i, r := range s { + count++ + if count > self.Limit { + break + } + segments = append(segments, i+utf8.RuneLen(r)) + } + + return 0, segments +} + +func (self Max) Len() int { + return lenNo +} + +func (self Max) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go new file mode 100644 index 000000000..db57ac8eb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/min.go @@ -0,0 +1,57 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Min struct { + Limit int +} + +func NewMin(l int) Min { + return Min{l} +} + +func (self Min) Match(s string) bool { + var l int + for range s { + l += 1 + if l >= self.Limit { + return true + } + } + + return false +} + +func (self Min) Index(s string) (int, []int) { + var count int + + c := len(s) - self.Limit + 1 + if c <= 0 { + return -1, nil + } + + segments := acquireSegments(c) + for i, r := range s { + count++ + if count >= self.Limit { + segments = append(segments, i+utf8.RuneLen(r)) + } + } + + if len(segments) == 0 { + return -1, nil + } + + return 0, segments +} + +func (self Min) Len() int { + return lenNo +} + +func (self Min) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go new file mode 100644 index 000000000..0d4ecd36b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/nothing.go @@ -0,0 +1,27 @@ +package match + +import ( + "fmt" +) + +type Nothing struct{} + +func NewNothing() Nothing { + return Nothing{} +} + +func (self Nothing) Match(s string) bool { + return len(s) == 0 +} + +func (self Nothing) Index(s string) (int, []int) { + return 0, segments0 +} + +func (self Nothing) Len() int { + return lenZero +} + +func (self Nothing) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go new file mode 100644 index 000000000..a7347250e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix.go @@ -0,0 +1,50 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type Prefix struct { + Prefix string +} + +func NewPrefix(p string) Prefix { + return Prefix{p} +} + +func (self Prefix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + length := len(self.Prefix) + var sub string + if len(s) > idx+length { + sub = s[idx+length:] + } else { + sub = "" + } + + segments := acquireSegments(len(sub) + 1) + segments = append(segments, length) + for i, r := range sub { + segments = append(segments, length+i+utf8.RuneLen(r)) + } + + return idx, segments +} + +func (self Prefix) Len() int { + return lenNo +} + +func (self Prefix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) +} + +func (self Prefix) String() string { + return fmt.Sprintf("", self.Prefix) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go new file mode 100644 index 000000000..8ee58fe1b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_any.go @@ -0,0 +1,55 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" + + sutil "github.com/gobwas/glob/util/strings" +) + +type PrefixAny struct { + Prefix string + Separators []rune +} + +func NewPrefixAny(s string, sep []rune) PrefixAny { + return PrefixAny{s, sep} +} + +func (self PrefixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + n := len(self.Prefix) + sub := s[idx+n:] + i := sutil.IndexAnyRunes(sub, self.Separators) + if i > -1 { + sub = sub[:i] + } + + seg := acquireSegments(len(sub) + 1) + seg = append(seg, n) + for i, r := range sub { + seg = append(seg, n+i+utf8.RuneLen(r)) + } + + return idx, seg +} + +func (self PrefixAny) Len() int { + return lenNo +} + +func (self PrefixAny) Match(s string) bool { + if !strings.HasPrefix(s, self.Prefix) { + return false + } + return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 +} + +func (self PrefixAny) String() string { + return fmt.Sprintf("", self.Prefix, string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go new file mode 100644 index 000000000..8208085a1 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go @@ -0,0 +1,62 @@ +package match + +import ( + "fmt" + "strings" +) + +type PrefixSuffix struct { + Prefix, Suffix string +} + +func NewPrefixSuffix(p, s string) PrefixSuffix { + return PrefixSuffix{p, s} +} + +func (self PrefixSuffix) Index(s string) (int, []int) { + prefixIdx := strings.Index(s, self.Prefix) + if prefixIdx == -1 { + return -1, nil + } + + suffixLen := len(self.Suffix) + if suffixLen <= 0 { + return prefixIdx, []int{len(s) - prefixIdx} + } + + if (len(s) - prefixIdx) <= 0 { + return -1, nil + } + + segments := acquireSegments(len(s) - prefixIdx) + for sub := s[prefixIdx:]; ; { + suffixIdx := strings.LastIndex(sub, self.Suffix) + if suffixIdx == -1 { + break + } + + segments = append(segments, suffixIdx+suffixLen) + sub = sub[:suffixIdx] + } + + if len(segments) == 0 { + releaseSegments(segments) + return -1, nil + } + + reverseSegments(segments) + + return prefixIdx, segments +} + +func (self PrefixSuffix) Len() int { + return lenNo +} + +func (self PrefixSuffix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) +} + +func (self PrefixSuffix) String() string { + return fmt.Sprintf("", self.Prefix, self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go new file mode 100644 index 000000000..ce30245a4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/range.go @@ -0,0 +1,48 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Range struct { + Lo, Hi rune + Not bool +} + +func NewRange(lo, hi rune, not bool) Range { + return Range{lo, hi, not} +} + +func (self Range) Len() int { + return lenOne +} + +func (self Range) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inRange := r >= self.Lo && r <= self.Hi + + return inRange == !self.Not +} + +func (self Range) Index(s string) (int, []int) { + for i, r := range s { + if self.Not != (r >= self.Lo && r <= self.Hi) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Range) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) +} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go new file mode 100644 index 000000000..4379042e4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/row.go @@ -0,0 +1,77 @@ +package match + +import ( + "fmt" +) + +type Row struct { + Matchers Matchers + RunesLength int + Segments []int +} + +func NewRow(len int, m ...Matcher) Row { + return Row{ + Matchers: Matchers(m), + RunesLength: len, + Segments: []int{len}, + } +} + +func (self Row) matchAll(s string) bool { + var idx int + for _, m := range self.Matchers { + length := m.Len() + + var next, i int + for next = range s[idx:] { + i++ + if i == length { + break + } + } + + if i < length || !m.Match(s[idx:idx+next+1]) { + return false + } + + idx += next + 1 + } + + return true +} + +func (self Row) lenOk(s string) bool { + var i int + for range s { + i++ + if i > self.RunesLength { + return false + } + } + return self.RunesLength == i +} + +func (self Row) Match(s string) bool { + return self.lenOk(s) && self.matchAll(s) +} + +func (self Row) Len() (l int) { + return self.RunesLength +} + +func (self Row) Index(s string) (int, []int) { + for i := range s { + if len(s[i:]) < self.RunesLength { + break + } + if self.matchAll(s[i:]) { + return i, self.Segments + } + } + return -1, nil +} + +func (self Row) String() string { + return fmt.Sprintf("", self.RunesLength, self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go new file mode 100644 index 000000000..9ea6f3094 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/segments.go @@ -0,0 +1,91 @@ +package match + +import ( + "sync" +) + +type SomePool interface { + Get() []int + Put([]int) +} + +var segmentsPools [1024]sync.Pool + +func toPowerOfTwo(v int) int { + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + return v +} + +const ( + cacheFrom = 16 + cacheToAndHigher = 1024 + cacheFromIndex = 15 + cacheToAndHigherIndex = 1023 +) + +var ( + segments0 = []int{0} + segments1 = []int{1} + segments2 = []int{2} + segments3 = []int{3} + segments4 = []int{4} +) + +var segmentsByRuneLength [5][]int = [5][]int{ + 0: segments0, + 1: segments1, + 2: segments2, + 3: segments3, + 4: segments4, +} + +func init() { + for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { + func(i int) { + segmentsPools[i-1] = sync.Pool{New: func() interface{} { + return make([]int, 0, i) + }} + }(i) + } +} + +func getTableIndex(c int) int { + p := toPowerOfTwo(c) + switch { + case p >= cacheToAndHigher: + return cacheToAndHigherIndex + case p <= cacheFrom: + return cacheFromIndex + default: + return p - 1 + } +} + +func acquireSegments(c int) []int { + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return make([]int, 0, c) + } + + return segmentsPools[getTableIndex(c)].Get().([]int)[:0] +} + +func releaseSegments(s []int) { + c := cap(s) + + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return + } + + segmentsPools[getTableIndex(c)].Put(s) +} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go new file mode 100644 index 000000000..ee6e3954c --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +// single represents ? +type Single struct { + Separators []rune +} + +func NewSingle(s []rune) Single { + return Single{s} +} + +func (self Single) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + return runes.IndexRune(self.Separators, r) == -1 +} + +func (self Single) Len() int { + return lenOne +} + +func (self Single) Index(s string) (int, []int) { + for i, r := range s { + if runes.IndexRune(self.Separators, r) == -1 { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Single) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go new file mode 100644 index 000000000..85bea8c68 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix.go @@ -0,0 +1,35 @@ +package match + +import ( + "fmt" + "strings" +) + +type Suffix struct { + Suffix string +} + +func NewSuffix(s string) Suffix { + return Suffix{s} +} + +func (self Suffix) Len() int { + return lenNo +} + +func (self Suffix) Match(s string) bool { + return strings.HasSuffix(s, self.Suffix) +} + +func (self Suffix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + return 0, []int{idx + len(self.Suffix)} +} + +func (self Suffix) String() string { + return fmt.Sprintf("", self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go new file mode 100644 index 000000000..c5106f819 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix_any.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "strings" + + sutil "github.com/gobwas/glob/util/strings" +) + +type SuffixAny struct { + Suffix string + Separators []rune +} + +func NewSuffixAny(s string, sep []rune) SuffixAny { + return SuffixAny{s, sep} +} + +func (self SuffixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 + + return i, []int{idx + len(self.Suffix) - i} +} + +func (self SuffixAny) Len() int { + return lenNo +} + +func (self SuffixAny) Match(s string) bool { + if !strings.HasSuffix(s, self.Suffix) { + return false + } + return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 +} + +func (self SuffixAny) String() string { + return fmt.Sprintf("", string(self.Separators), self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go new file mode 100644 index 000000000..3875950bb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/super.go @@ -0,0 +1,33 @@ +package match + +import ( + "fmt" +) + +type Super struct{} + +func NewSuper() Super { + return Super{} +} + +func (self Super) Match(s string) bool { + return true +} + +func (self Super) Len() int { + return lenNo +} + +func (self Super) Index(s string) (int, []int) { + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Super) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go new file mode 100644 index 000000000..0a17616d3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/text.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// raw represents raw string to match +type Text struct { + Str string + RunesLength int + BytesLength int + Segments []int +} + +func NewText(s string) Text { + return Text{ + Str: s, + RunesLength: utf8.RuneCountInString(s), + BytesLength: len(s), + Segments: []int{len(s)}, + } +} + +func (self Text) Match(s string) bool { + return self.Str == s +} + +func (self Text) Len() int { + return self.RunesLength +} + +func (self Text) Index(s string) (int, []int) { + index := strings.Index(s, self.Str) + if index == -1 { + return -1, nil + } + + return index, self.Segments +} + +func (self Text) String() string { + return fmt.Sprintf("", self.Str) +} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md new file mode 100644 index 000000000..f58144e73 --- /dev/null +++ b/vendor/github.com/gobwas/glob/readme.md @@ -0,0 +1,148 @@ +# glob.[go](https://golang.org) + +[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] + +> Go Globbing Library. + +## Install + +```shell + go get github.com/gobwas/glob +``` + +## Example + +```go + +package main + +import "github.com/gobwas/glob" + +func main() { + var g glob.Glob + + // create simple glob + g = glob.MustCompile("*.github.com") + g.Match("api.github.com") // true + + // quote meta characters and then create simple glob + g = glob.MustCompile(glob.QuoteMeta("*.github.com")) + g.Match("*.github.com") // true + + // create new glob with set of delimiters as ["."] + g = glob.MustCompile("api.*.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // false + + // create new glob with set of delimiters as ["."] + // but now with super wildcard + g = glob.MustCompile("api.**.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // true + + // create glob with single symbol wildcard + g = glob.MustCompile("?at") + g.Match("cat") // true + g.Match("fat") // true + g.Match("at") // false + + // create glob with single symbol wildcard and delimiters ['f'] + g = glob.MustCompile("?at", 'f') + g.Match("cat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[abc]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[!abc]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[a-c]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[!a-c]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with pattern-alternatives list + g = glob.MustCompile("{cat,bat,[fr]at}") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // true + g.Match("rat") // true + g.Match("at") // false + g.Match("zat") // false +} + +``` + +## Performance + +This library is created for compile-once patterns. This means, that compilation could take time, but +strings matching is done faster, than in case when always parsing template. + +If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. + +Run `go test -bench=.` from source root to see the benchmarks: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 +`https://*.google.*` | `https://account.google.com` | `true` | 96 +`https://*.google.*` | `https://google.com` | `false` | 66 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 +`abc*` | `abcdef` | `true` | 8.15 +`abc*` | `af` | `false` | 5.68 +`*def` | `abcdef` | `true` | 8.84 +`*def` | `af` | `false` | 5.74 +`ab*ef` | `abcdef` | `true` | 15.2 +`ab*ef` | `af` | `false` | 10.4 + +The same things with `regexp` package: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 +`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 +`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 +`^abc.*$` | `abcdef` | `true` | 237 +`^abc.*$` | `af` | `false` | 100 +`^.*def$` | `abcdef` | `true` | 464 +`^.*def$` | `af` | `false` | 265 +`^ab.*ef$` | `abcdef` | `true` | 375 +`^ab.*ef$` | `af` | `false` | 145 + +[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg +[godoc-url]: https://godoc.org/github.com/gobwas/glob +[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master +[travis-url]: https://travis-ci.org/gobwas/glob + +## Syntax + +Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), +except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go new file mode 100644 index 000000000..3220a694a --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + "fmt" +) + +type Node struct { + Parent *Node + Children []*Node + Value interface{} + Kind Kind +} + +func NewNode(k Kind, v interface{}, ch ...*Node) *Node { + n := &Node{ + Kind: k, + Value: v, + } + for _, c := range ch { + Insert(n, c) + } + return n +} + +func (a *Node) Equal(b *Node) bool { + if a.Kind != b.Kind { + return false + } + if a.Value != b.Value { + return false + } + if len(a.Children) != len(b.Children) { + return false + } + for i, c := range a.Children { + if !c.Equal(b.Children[i]) { + return false + } + } + return true +} + +func (a *Node) String() string { + var buf bytes.Buffer + buf.WriteString(a.Kind.String()) + if a.Value != nil { + buf.WriteString(" =") + buf.WriteString(fmt.Sprintf("%v", a.Value)) + } + if len(a.Children) > 0 { + buf.WriteString(" [") + for i, c := range a.Children { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(c.String()) + } + buf.WriteString("]") + } + return buf.String() +} + +func Insert(parent *Node, children ...*Node) { + parent.Children = append(parent.Children, children...) + for _, ch := range children { + ch.Parent = parent + } +} + +type List struct { + Not bool + Chars string +} + +type Range struct { + Not bool + Lo, Hi rune +} + +type Text struct { + Text string +} + +type Kind int + +const ( + KindNothing Kind = iota + KindPattern + KindList + KindRange + KindText + KindAny + KindSuper + KindSingle + KindAnyOf +) + +func (k Kind) String() string { + switch k { + case KindNothing: + return "Nothing" + case KindPattern: + return "Pattern" + case KindList: + return "List" + case KindRange: + return "Range" + case KindText: + return "Text" + case KindAny: + return "Any" + case KindSuper: + return "Super" + case KindSingle: + return "Single" + case KindAnyOf: + return "AnyOf" + default: + return "" + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go new file mode 100644 index 000000000..429b40943 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -0,0 +1,157 @@ +package ast + +import ( + "errors" + "fmt" + "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" +) + +type Lexer interface { + Next() lexer.Token +} + +type parseFn func(*Node, Lexer) (parseFn, *Node, error) + +func Parse(lexer Lexer) (*Node, error) { + var parser parseFn + + root := NewNode(KindPattern, nil) + + var ( + tree *Node + err error + ) + for parser, tree = parserMain, root; parser != nil; { + parser, tree, err = parser(tree, lexer) + if err != nil { + return nil, err + } + } + + return root, nil +} + +func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, nil + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Text: + Insert(tree, NewNode(KindText, Text{token.Raw})) + return parserMain, tree, nil + + case lexer.Any: + Insert(tree, NewNode(KindAny, nil)) + return parserMain, tree, nil + + case lexer.Super: + Insert(tree, NewNode(KindSuper, nil)) + return parserMain, tree, nil + + case lexer.Single: + Insert(tree, NewNode(KindSingle, nil)) + return parserMain, tree, nil + + case lexer.RangeOpen: + return parserRange, tree, nil + + case lexer.TermsOpen: + a := NewNode(KindAnyOf, nil) + Insert(tree, a) + + p := NewNode(KindPattern, nil) + Insert(a, p) + + return parserMain, p, nil + + case lexer.Separator: + p := NewNode(KindPattern, nil) + Insert(tree.Parent, p) + + return parserMain, p, nil + + case lexer.TermsClose: + return parserMain, tree.Parent.Parent, nil + + default: + return nil, tree, fmt.Errorf("unexpected token: %s", token) + } + } + return nil, tree, fmt.Errorf("unknown error") +} + +func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { + var ( + not bool + lo rune + hi rune + chars string + ) + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, errors.New("unexpected end") + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Not: + not = true + + case lexer.RangeLo: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + lo = r + + case lexer.RangeBetween: + // + + case lexer.RangeHi: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + + hi = r + + if hi < lo { + return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) + } + + case lexer.Text: + chars = token.Raw + + case lexer.RangeClose: + isRange := lo != 0 && hi != 0 + isChars := chars != "" + + if isChars == isRange { + return nil, tree, fmt.Errorf("could not parse range") + } + + if isRange { + Insert(tree, NewNode(KindRange, Range{ + Lo: lo, + Hi: hi, + Not: not, + })) + } else { + Insert(tree, NewNode(KindList, List{ + Chars: chars, + Not: not, + })) + } + + return parserMain, tree, nil + } + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go new file mode 100644 index 000000000..a1c8d1962 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -0,0 +1,273 @@ +package lexer + +import ( + "bytes" + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +const ( + char_any = '*' + char_comma = ',' + char_single = '?' + char_escape = '\\' + char_range_open = '[' + char_range_close = ']' + char_terms_open = '{' + char_terms_close = '}' + char_range_not = '!' + char_range_between = '-' +) + +var specials = []byte{ + char_any, + char_single, + char_escape, + char_range_open, + char_range_close, + char_terms_open, + char_terms_close, +} + +func Special(c byte) bool { + return bytes.IndexByte(specials, c) != -1 +} + +type tokens []Token + +func (i *tokens) shift() (ret Token) { + ret = (*i)[0] + copy(*i, (*i)[1:]) + *i = (*i)[:len(*i)-1] + return +} + +func (i *tokens) push(v Token) { + *i = append(*i, v) +} + +func (i *tokens) empty() bool { + return len(*i) == 0 +} + +var eof rune = 0 + +type lexer struct { + data string + pos int + err error + + tokens tokens + termsLevel int + + lastRune rune + lastRuneSize int + hasRune bool +} + +func NewLexer(source string) *lexer { + l := &lexer{ + data: source, + tokens: tokens(make([]Token, 0, 4)), + } + return l +} + +func (l *lexer) Next() Token { + if l.err != nil { + return Token{Error, l.err.Error()} + } + if !l.tokens.empty() { + return l.tokens.shift() + } + + l.fetchItem() + return l.Next() +} + +func (l *lexer) peek() (r rune, w int) { + if l.pos == len(l.data) { + return eof, 0 + } + + r, w = utf8.DecodeRuneInString(l.data[l.pos:]) + if r == utf8.RuneError { + l.errorf("could not read rune") + r = eof + w = 0 + } + + return +} + +func (l *lexer) read() rune { + if l.hasRune { + l.hasRune = false + l.seek(l.lastRuneSize) + return l.lastRune + } + + r, s := l.peek() + l.seek(s) + + l.lastRune = r + l.lastRuneSize = s + + return r +} + +func (l *lexer) seek(w int) { + l.pos += w +} + +func (l *lexer) unread() { + if l.hasRune { + l.errorf("could not unread rune") + return + } + l.seek(-l.lastRuneSize) + l.hasRune = true +} + +func (l *lexer) errorf(f string, v ...interface{}) { + l.err = fmt.Errorf(f, v...) +} + +func (l *lexer) inTerms() bool { + return l.termsLevel > 0 +} + +func (l *lexer) termsEnter() { + l.termsLevel++ +} + +func (l *lexer) termsLeave() { + l.termsLevel-- +} + +var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} +var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) + +func (l *lexer) fetchItem() { + r := l.read() + switch { + case r == eof: + l.tokens.push(Token{EOF, ""}) + + case r == char_terms_open: + l.termsEnter() + l.tokens.push(Token{TermsOpen, string(r)}) + + case r == char_comma && l.inTerms(): + l.tokens.push(Token{Separator, string(r)}) + + case r == char_terms_close && l.inTerms(): + l.tokens.push(Token{TermsClose, string(r)}) + l.termsLeave() + + case r == char_range_open: + l.tokens.push(Token{RangeOpen, string(r)}) + l.fetchRange() + + case r == char_single: + l.tokens.push(Token{Single, string(r)}) + + case r == char_any: + if l.read() == char_any { + l.tokens.push(Token{Super, string(r) + string(r)}) + } else { + l.unread() + l.tokens.push(Token{Any, string(r)}) + } + + default: + l.unread() + + var breakers []rune + if l.inTerms() { + breakers = inTermsBreakers + } else { + breakers = inTextBreakers + } + l.fetchText(breakers) + } +} + +func (l *lexer) fetchRange() { + var wantHi bool + var wantClose bool + var seenNot bool + for { + r := l.read() + if r == eof { + l.errorf("unexpected end of input") + return + } + + if wantClose { + if r != char_range_close { + l.errorf("expected close range character") + } else { + l.tokens.push(Token{RangeClose, string(r)}) + } + return + } + + if wantHi { + l.tokens.push(Token{RangeHi, string(r)}) + wantClose = true + continue + } + + if !seenNot && r == char_range_not { + l.tokens.push(Token{Not, string(r)}) + seenNot = true + continue + } + + if n, w := l.peek(); n == char_range_between { + l.seek(w) + l.tokens.push(Token{RangeLo, string(r)}) + l.tokens.push(Token{RangeBetween, string(n)}) + wantHi = true + continue + } + + l.unread() // unread first peek and fetch as text + l.fetchText([]rune{char_range_close}) + wantClose = true + } +} + +func (l *lexer) fetchText(breakers []rune) { + var data []rune + var escaped bool + +reading: + for { + r := l.read() + if r == eof { + break + } + + if !escaped { + if r == char_escape { + escaped = true + continue + } + + if runes.IndexRune(breakers, r) != -1 { + l.unread() + break reading + } + } + + escaped = false + data = append(data, r) + } + + if len(data) > 0 { + l.tokens.push(Token{Text, string(data)}) + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go new file mode 100644 index 000000000..2797c4e83 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go @@ -0,0 +1,88 @@ +package lexer + +import "fmt" + +type TokenType int + +const ( + EOF TokenType = iota + Error + Text + Char + Any + Super + Single + Not + Separator + RangeOpen + RangeClose + RangeLo + RangeHi + RangeBetween + TermsOpen + TermsClose +) + +func (tt TokenType) String() string { + switch tt { + case EOF: + return "eof" + + case Error: + return "error" + + case Text: + return "text" + + case Char: + return "char" + + case Any: + return "any" + + case Super: + return "super" + + case Single: + return "single" + + case Not: + return "not" + + case Separator: + return "separator" + + case RangeOpen: + return "range_open" + + case RangeClose: + return "range_close" + + case RangeLo: + return "range_lo" + + case RangeHi: + return "range_hi" + + case RangeBetween: + return "range_between" + + case TermsOpen: + return "terms_open" + + case TermsClose: + return "terms_close" + + default: + return "undef" + } +} + +type Token struct { + Type TokenType + Raw string +} + +func (t Token) String() string { + return fmt.Sprintf("%v<%q>", t.Type, t.Raw) +} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go new file mode 100644 index 000000000..1d168b148 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/syntax.go @@ -0,0 +1,14 @@ +package syntax + +import ( + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +func Parse(s string) (*ast.Node, error) { + return ast.Parse(lexer.NewLexer(s)) +} + +func Special(b byte) bool { + return lexer.Special(b) +} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go new file mode 100644 index 000000000..a72355641 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/runes/runes.go @@ -0,0 +1,154 @@ +package runes + +func Index(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + return 0 + case ln == 1: + return IndexRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := 0; i < ls && ls-i >= ln; i++ { + for y := 0; y < ln; y++ { + if s[i+y] != needle[y] { + continue head + } + } + + return i + } + + return -1 +} + +func LastIndex(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + if ls == 0 { + return 0 + } + return ls + case ln == 1: + return IndexLastRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := ls - 1; i >= 0 && i >= ln; i-- { + for y := ln - 1; y >= 0; y-- { + if s[i-(ln-y-1)] != needle[y] { + continue head + } + } + + return i - ln + 1 + } + + return -1 +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars []rune) int { + if len(chars) > 0 { + for i, c := range s { + for _, m := range chars { + if c == m { + return i + } + } + } + } + return -1 +} + +func Contains(s, needle []rune) bool { + return Index(s, needle) >= 0 +} + +func Max(s []rune) (max rune) { + for _, r := range s { + if r > max { + max = r + } + } + + return +} + +func Min(s []rune) rune { + min := rune(-1) + for _, r := range s { + if min == -1 { + min = r + continue + } + + if r < min { + min = r + } + } + + return min +} + +func IndexRune(s []rune, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +func IndexLastRune(s []rune, r rune) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == r { + return i + } + } + + return -1 +} + +func Equal(a, b []rune) bool { + if len(a) == len(b) { + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true + } + + return false +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix []rune) bool { + return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix []rune) bool { + return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go new file mode 100644 index 000000000..e8ee1920b --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/strings/strings.go @@ -0,0 +1,39 @@ +package strings + +import ( + "strings" + "unicode/utf8" +) + +func IndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + if i := strings.IndexRune(s, r); i != -1 { + return i + } + } + + return -1 +} + +func LastIndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + i := -1 + if 0 <= r && r < utf8.RuneSelf { + i = strings.LastIndexByte(s, byte(r)) + } else { + sub := s + for len(sub) > 0 { + j := strings.IndexRune(s, r) + if j == -1 { + break + } + i = j + sub = sub[i+1:] + } + } + if i != -1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md new file mode 100644 index 000000000..d7b4b8d58 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing # + +Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. + +## New API or feature ## + +I want to speak more about how to add new functions to this package. + +Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. + +* Rule 1: Only string algorithm, which takes string as input, can be included. +* Rule 2: If a function has been implemented in package `string`, it must not be included. +* Rule 3: If a function is not language neutral, it must not be included. +* Rule 4: If a function is a part of standard library in other languages, it can be included. +* Rule 5: If a function is quite useful in some famous framework or library, it can be included. + +New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. + +## Pull request ## + +Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. + +If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE new file mode 100644 index 000000000..270177259 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md new file mode 100644 index 000000000..750c3c7eb --- /dev/null +++ b/vendor/github.com/huandu/xstrings/README.md @@ -0,0 +1,117 @@ +# xstrings + +[![Build Status](https://github.com/huandu/xstrings/workflows/Go/badge.svg)](https://github.com/huandu/xstrings/actions) +[![Go Doc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://pkg.go.dev/github.com/huandu/xstrings) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) +[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) + +Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). + +All functions are well tested and carefully tuned for performance. + +## Propose a new function + +Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. + +## Install + +Use `go get` to install this library. + + go get github.com/huandu/xstrings + +## API document + +See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. + +## Function list + +Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. + +Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. + +### Package `xstrings` functions + +_Keep this table sorted by Function in ascending order._ + +| Function | Friends | # | +| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------------- | +| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | +| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | +| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | +| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | +| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | +| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | +| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | +| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | +| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | +| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | +| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | +| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | +| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | +| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | +| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | +| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | +| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | +| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | +| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | +| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | + +### Package `strings` functions + +_Keep this table sorted by Function in ascending order._ + +| Function | Friends | +| --------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | +| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | +| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | +| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | +| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | +| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | +| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | +| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | +| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | +| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | +| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | +| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | +| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | +| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | +| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | +| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | +| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | +| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | +| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | +| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | +| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | +| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | +| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | +| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | +| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | +| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | +| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | +| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | +| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | +| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | +| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | +| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | +| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | +| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | +| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | +| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | +| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | + +## License + +This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go new file mode 100644 index 000000000..f427cc84e --- /dev/null +++ b/vendor/github.com/huandu/xstrings/common.go @@ -0,0 +1,21 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +const bufferMaxInitGrowSize = 2048 + +// Lazy initialize a buffer. +func allocBuffer(orig, cur string) *stringBuilder { + output := &stringBuilder{} + maxSize := len(orig) * 4 + + // Avoid to reserve too much memory at once. + if maxSize > bufferMaxInitGrowSize { + maxSize = bufferMaxInitGrowSize + } + + output.Grow(maxSize) + output.WriteString(orig[:len(orig)-len(cur)]) + return output +} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go new file mode 100644 index 000000000..151c3151d --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -0,0 +1,590 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "math/rand" + "unicode" + "unicode/utf8" +) + +// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. +// +// Some samples. +// "some_words" => "SomeWords" +// "http_server" => "HttpServer" +// "no_https" => "NoHttps" +// "_complex__case_" => "_Complex_Case_" +// "some words" => "SomeWords" +func ToCamelCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + var r0, r1 rune + var size int + + // leading connector will appear in output. + for len(str) > 0 { + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !isConnector(r0) { + r0 = unicode.ToUpper(r0) + break + } + + buf.WriteRune(r0) + } + + if len(str) == 0 { + // A special case for a string contains only 1 rune. + if size != 0 { + buf.WriteRune(r0) + } + + return buf.String() + } + + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r0) && isConnector(r1) { + buf.WriteRune(r1) + continue + } + + if isConnector(r1) { + r0 = unicode.ToUpper(r0) + } else { + r0 = unicode.ToLower(r0) + buf.WriteRune(r1) + } + } + + buf.WriteRune(r0) + return buf.String() +} + +// ToSnakeCase can convert all upper case characters in a string to +// snake case format. +// +// Some samples. +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http_20x_ok" +// "Duration2m3s" => "duration_2m3s" +// "Bld4Floor3rd" => "bld4_floor_3rd" +func ToSnakeCase(str string) string { + return camelCaseToLowerCase(str, '_') +} + +// ToKebabCase can convert all upper case characters in a string to +// kebab case format. +// +// Some samples. +// "FirstName" => "first-name" +// "HTTPServer" => "http-server" +// "NoHTTPS" => "no-https" +// "GO_PATH" => "go-path" +// "GO PATH" => "go-path" // space is converted to '-'. +// "GO-PATH" => "go-path" // hyphen is converted to '-'. +// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http-20x-ok" +// "Duration2m3s" => "duration-2m3s" +// "Bld4Floor3rd" => "bld4-floor-3rd" +func ToKebabCase(str string) string { + return camelCaseToLowerCase(str, '-') +} + +func camelCaseToLowerCase(str string, connector rune) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + wt, word, remaining := nextWord(str) + + for len(remaining) > 0 { + if wt != connectorWord { + toLower(buf, wt, word, connector) + } + + prev := wt + last := word + wt, word, remaining = nextWord(remaining) + + switch prev { + case numberWord: + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != punctWord && wt != connectorWord { + buf.WriteRune(connector) + } + + case connectorWord: + toLower(buf, prev, last, connector) + + case punctWord: + // nothing. + + default: + if wt != numberWord { + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + if len(remaining) == 0 { + break + } + + last := word + wt, word, remaining = nextWord(remaining) + + // consider number as a part of previous word. + // e.g. "Bld4Floor" => "bld4_floor" + if wt != alphabetWord { + toLower(buf, numberWord, last, connector) + + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + // if there are some lower case letters following a number, + // add connector before the number. + // e.g. "HTTP2xx" => "http_2xx" + buf.WriteRune(connector) + toLower(buf, numberWord, last, connector) + + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + } + } + + toLower(buf, wt, word, connector) + return buf.String() +} + +func isConnector(r rune) bool { + return r == '-' || r == '_' || unicode.IsSpace(r) +} + +type wordType int + +const ( + invalidWord wordType = iota + numberWord + upperCaseWord + alphabetWord + connectorWord + punctWord + otherWord +) + +func nextWord(str string) (wt wordType, word, remaining string) { + if len(str) == 0 { + return + } + + var offset int + remaining = str + r, size := nextValidRune(remaining, utf8.RuneError) + offset += size + + if r == utf8.RuneError { + wt = invalidWord + word = str[:offset] + remaining = str[offset:] + return + } + + switch { + case isConnector(r): + wt = connectorWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isConnector(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsPunct(r): + wt = punctWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsUpper(r): + wt = upperCaseWord + remaining = remaining[size:] + + if len(remaining) == 0 { + break + } + + r, size = nextValidRune(remaining, r) + + switch { + case unicode.IsUpper(r): + prevSize := size + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsUpper(r) { + break + } + + prevSize = size + offset += size + remaining = remaining[size:] + } + + // it's a bit complex when dealing with a case like "HTTPStatus". + // it's expected to be splitted into "HTTP" and "Status". + // Therefore "S" should be in remaining instead of word. + if len(remaining) > 0 && isAlphabet(r) { + offset -= prevSize + remaining = str[offset:] + } + + case isAlphabet(r): + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + case isAlphabet(r): + wt = alphabetWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsNumber(r): + wt = numberWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsNumber(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + default: + wt = otherWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + word = str[:offset] + return +} + +func nextValidRune(str string, prev rune) (r rune, size int) { + var sz int + + for len(str) > 0 { + r, sz = utf8.DecodeRuneInString(str) + size += sz + + if r != utf8.RuneError { + return + } + + str = str[sz:] + } + + r = prev + return +} + +func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { + buf.Grow(buf.Len() + len(str)) + + if wt != upperCaseWord && wt != connectorWord { + buf.WriteString(str) + return + } + + for len(str) > 0 { + r, size := utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r) { + buf.WriteRune(connector) + } else if unicode.IsUpper(r) { + buf.WriteRune(unicode.ToLower(r)) + } else { + buf.WriteRune(r) + } + } +} + +// SwapCase will swap characters case from upper to lower or lower to upper. +func SwapCase(str string) string { + var r rune + var size int + + buf := &stringBuilder{} + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case unicode.IsUpper(r): + buf.WriteRune(unicode.ToLower(r)) + + case unicode.IsLower(r): + buf.WriteRune(unicode.ToUpper(r)) + + default: + buf.WriteRune(r) + } + + str = str[size:] + } + + return buf.String() +} + +// FirstRuneToUpper converts first rune to upper case if necessary. +func FirstRuneToUpper(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsLower(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToUpper(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// FirstRuneToLower converts first rune to lower case if necessary. +func FirstRuneToLower(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsUpper(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToLower(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// Shuffle randomizes runes in a string and returns the result. +// It uses default random source in `math/rand`. +func Shuffle(str string) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + + for i := len(runes) - 1; i > 0; i-- { + index = rand.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// ShuffleSource randomizes runes in a string with given random source. +func ShuffleSource(str string, src rand.Source) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + r := rand.New(src) + + for i := len(runes) - 1; i > 0; i-- { + index = r.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// Successor returns the successor to string. +// +// If there is one alphanumeric rune is found in string, increase the rune by 1. +// If increment generates a "carry", the rune to the left of it is incremented. +// This process repeats until there is no carry, adding an additional rune if necessary. +// +// If there is no alphanumeric rune, the rightmost rune will be increased by 1 +// regardless whether the result is a valid rune or not. +// +// Only following characters are alphanumeric. +// * a - z +// * A - Z +// * 0 - 9 +// +// Samples (borrowed from ruby's String#succ document): +// "abcd" => "abce" +// "THX1138" => "THX1139" +// "<>" => "<>" +// "1999zzz" => "2000aaa" +// "ZZZ9999" => "AAAA0000" +// "***" => "**+" +func Successor(str string) string { + if str == "" { + return str + } + + var r rune + var i int + carry := ' ' + runes := []rune(str) + l := len(runes) + lastAlphanumeric := l + + for i = l - 1; i >= 0; i-- { + r = runes[i] + + if ('a' <= r && r <= 'y') || + ('A' <= r && r <= 'Y') || + ('0' <= r && r <= '8') { + runes[i]++ + carry = ' ' + lastAlphanumeric = i + break + } + + switch r { + case 'z': + runes[i] = 'a' + carry = 'a' + lastAlphanumeric = i + + case 'Z': + runes[i] = 'A' + carry = 'A' + lastAlphanumeric = i + + case '9': + runes[i] = '0' + carry = '0' + lastAlphanumeric = i + } + } + + // Needs to add one character for carry. + if i < 0 && carry != ' ' { + buf := &stringBuilder{} + buf.Grow(l + 4) // Reserve enough space for write. + + if lastAlphanumeric != 0 { + buf.WriteString(str[:lastAlphanumeric]) + } + + buf.WriteRune(carry) + + for _, r = range runes[lastAlphanumeric:] { + buf.WriteRune(r) + } + + return buf.String() + } + + // No alphanumeric character. Simply increase last rune's value. + if lastAlphanumeric == l { + runes[l-1]++ + } + + return string(runes) +} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go new file mode 100644 index 000000000..f96e38703 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count.go @@ -0,0 +1,120 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +// Len returns str's utf8 rune length. +func Len(str string) int { + return utf8.RuneCountInString(str) +} + +// WordCount returns number of words in a string. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordCount(str string) int { + var r rune + var size, n int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + n++ + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + inWord = false + } + + str = str[size:] + } + + return n +} + +const minCJKCharacter = '\u3400' + +// Checks r is a letter but not CJK character. +func isAlphabet(r rune) bool { + if !unicode.IsLetter(r) { + return false + } + + switch { + // Quick check for non-CJK character. + case r < minCJKCharacter: + return true + + // Common CJK characters. + case r >= '\u4E00' && r <= '\u9FCC': + return false + + // Rare CJK characters. + case r >= '\u3400' && r <= '\u4D85': + return false + + // Rare and historic CJK characters. + case r >= '\U00020000' && r <= '\U0002B81D': + return false + } + + return true +} + +// Width returns string width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func Width(str string) int { + var r rune + var size, n int + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + n += RuneWidth(r) + str = str[size:] + } + + return n +} + +// RuneWidth returns character width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func RuneWidth(r rune) int { + switch { + case r == utf8.RuneError || r < '\x20': + return 0 + + case '\x20' <= r && r < '\u2000': + return 1 + + case '\u2000' <= r && r < '\uFF61': + return 2 + + case '\uFF61' <= r && r < '\uFFA0': + return 1 + + case '\uFFA0' <= r: + return 2 + } + + return 0 +} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go new file mode 100644 index 000000000..1a6ef069f --- /dev/null +++ b/vendor/github.com/huandu/xstrings/doc.go @@ -0,0 +1,8 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. +// See project home page for details. https://github.com/huandu/xstrings +// +// Package xstrings assumes all strings are encoded in utf8. +package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go new file mode 100644 index 000000000..8cd76c525 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format.go @@ -0,0 +1,169 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode/utf8" +) + +// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on +// current column and tabSize. +// The column number is reset to zero after each newline ('\n') occurring in the str. +// +// ExpandTabs uses RuneWidth to decide rune's width. +// For example, CJK characters will be treated as two characters. +// +// If tabSize <= 0, ExpandTabs panics with error. +// +// Samples: +// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" +// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" +// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" +func ExpandTabs(str string, tabSize int) string { + if tabSize <= 0 { + panic("tab size must be positive") + } + + var r rune + var i, size, column, expand int + var output *stringBuilder + + orig := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == '\t' { + expand = tabSize - column%tabSize + + if output == nil { + output = allocBuffer(orig, str) + } + + for i = 0; i < expand; i++ { + output.WriteRune(' ') + } + + column += expand + } else { + if r == '\n' { + column = 0 + } else { + column += RuneWidth(r) + } + + if output != nil { + output.WriteRune(r) + } + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} + +// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// LeftJustify("hello", 4, " ") => "hello" +// LeftJustify("hello", 10, " ") => "hello " +// LeftJustify("hello", 10, "123") => "hello12312" +func LeftJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + output.WriteString(str) + writePadString(output, pad, padLen, remains) + return output.String() +} + +// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// RightJustify("hello", 4, " ") => "hello" +// RightJustify("hello", 10, " ") => " hello" +// RightJustify("hello", 10, "123") => "12312hello" +func RightJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains) + output.WriteString(str) + return output.String() +} + +// Center returns a string with pad string at both side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// Center("hello", 4, " ") => "hello" +// Center("hello", 10, " ") => " hello " +// Center("hello", 10, "123") => "12hello123" +func Center(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains/2) + output.WriteString(str) + writePadString(output, pad, padLen, (remains+1)/2) + return output.String() +} + +func writePadString(output *stringBuilder, pad string, padLen, remains int) { + var r rune + var size int + + repeats := remains / padLen + + for i := 0; i < repeats; i++ { + output.WriteString(pad) + } + + remains = remains % padLen + + if remains != 0 { + for i := 0; i < remains; i++ { + r, size = utf8.DecodeRuneInString(pad) + output.WriteRune(r) + pad = pad[size:] + } + } +} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go new file mode 100644 index 000000000..64075f9bb --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -0,0 +1,216 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strings" + "unicode/utf8" +) + +// Reverse a utf8 encoded string. +func Reverse(str string) string { + var size int + + tail := len(str) + buf := make([]byte, tail) + s := buf + + for len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + tail -= size + s = append(s[:tail], []byte(str[:size])...) + str = str[size:] + } + + return string(buf) +} + +// Slice a string by rune. +// +// Start must satisfy 0 <= start <= rune length. +// +// End can be positive, zero or negative. +// If end >= 0, start and end must satisfy start <= end <= rune length. +// If end < 0, it means slice to the end of string. +// +// Otherwise, Slice will panic as out of range. +func Slice(str string, start, end int) string { + var size, startPos, endPos int + + origin := str + + if start < 0 || end > len(str) || (end >= 0 && start > end) { + panic("out of range") + } + + if end >= 0 { + end -= start + } + + for start > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + start-- + startPos += size + str = str[size:] + } + + if end < 0 { + return origin[startPos:] + } + + endPos = startPos + + for end > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + end-- + endPos += size + str = str[size:] + } + + if len(str) == 0 && (start > 0 || end > 0) { + panic("out of range") + } + + return origin[startPos:endPos] +} + +// Partition splits a string by sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", Partition returns +// "he", "l", "lo" +// +// If str doesn't contain sep, for example "hello" and "x", Partition returns +// "hello", "", "" +func Partition(str, sep string) (head, match, tail string) { + index := strings.Index(str, sep) + + if index == -1 { + head = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// LastPartition splits a string by last instance of sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", LastPartition returns +// "hel", "l", "o" +// +// If str doesn't contain sep, for example "hello" and "x", LastPartition returns +// "", "", "hello" +func LastPartition(str, sep string) (head, match, tail string) { + index := strings.LastIndex(str, sep) + + if index == -1 { + tail = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// Insert src into dst at given rune index. +// Index is counted by runes instead of bytes. +// +// If index is out of range of dst, panic with out of range. +func Insert(dst, src string, index int) string { + return Slice(dst, 0, index) + src + Slice(dst, index, -1) +} + +// Scrub scrubs invalid utf8 bytes with repl string. +// Adjacent invalid bytes are replaced only once. +func Scrub(str, repl string) string { + var buf *stringBuilder + var r rune + var size, pos int + var hasError bool + + origin := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == utf8.RuneError { + if !hasError { + if buf == nil { + buf = &stringBuilder{} + } + + buf.WriteString(origin[:pos]) + hasError = true + } + } else if hasError { + hasError = false + buf.WriteString(repl) + + origin = origin[pos:] + pos = 0 + } + + pos += size + str = str[size:] + } + + if buf != nil { + buf.WriteString(origin) + return buf.String() + } + + // No invalid byte. + return origin +} + +// WordSplit splits a string into words. Returns a slice of words. +// If there is no word in a string, return nil. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordSplit(str string) []string { + var word string + var words []string + var r rune + var size, pos int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + word = str + pos = 0 + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + if inWord { + inWord = false + words = append(words, word[:pos]) + } + } + + pos += size + str = str[size:] + } + + if inWord { + words = append(words, word[:pos]) + } + + return words +} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go new file mode 100644 index 000000000..bb0919d32 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder.go @@ -0,0 +1,7 @@ +//+build go1.10 + +package xstrings + +import "strings" + +type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go new file mode 100644 index 000000000..dac389d13 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go @@ -0,0 +1,9 @@ +//+build !go1.10 + +package xstrings + +import "bytes" + +type stringBuilder struct { + bytes.Buffer +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go new file mode 100644 index 000000000..42e694fb1 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -0,0 +1,546 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +type runeRangeMap struct { + FromLo rune // Lower bound of range map. + FromHi rune // An inclusive higher bound of range map. + ToLo rune + ToHi rune +} + +type runeDict struct { + Dict [unicode.MaxASCII + 1]rune +} + +type runeMap map[rune]rune + +// Translator can translate string with pre-compiled from and to patterns. +// If a from/to pattern pair needs to be used more than once, it's recommended +// to create a Translator and reuse it. +type Translator struct { + quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. + runeMap runeMap // Rune map for translation. + ranges []*runeRangeMap // Ranges of runes. + mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. + reverted bool // If to pattern is empty, all matched characters will be deleted. + hasPattern bool +} + +// NewTranslator creates new Translator through a from/to pattern pair. +func NewTranslator(from, to string) *Translator { + tr := &Translator{} + + if from == "" { + return tr + } + + reverted := from[0] == '^' + deletion := len(to) == 0 + + if reverted { + from = from[1:] + } + + var fromStart, fromEnd, fromRangeStep rune + var toStart, toEnd, toRangeStep rune + var fromRangeSize, toRangeSize rune + var singleRunes []rune + + // Update the to rune range. + updateRange := func() { + // No more rune to read in the to rune pattern. + if toEnd == utf8.RuneError { + return + } + + if toRangeStep == 0 { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) + return + } + + // Current range is not empty. Consume 1 rune from start. + if toStart != toEnd { + toStart += toRangeStep + return + } + + // No more rune. Repeat the last rune. + if to == "" { + toEnd = utf8.RuneError + return + } + + // Both start and end are used. Read two more runes from the to pattern. + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + + if deletion { + toStart = utf8.RuneError + toEnd = utf8.RuneError + } else { + // If from pattern is reverted, only the last rune in the to pattern will be used. + if reverted { + var size int + + for len(to) > 0 { + toStart, size = utf8.DecodeRuneInString(to) + to = to[size:] + } + + toEnd = utf8.RuneError + } else { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + } + + fromEnd = utf8.RuneError + + for len(from) > 0 { + from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) + + // fromStart is a single character. Just map it with a rune in the to pattern. + if fromRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + continue + } + + for toEnd != utf8.RuneError && fromStart != fromEnd { + // If mapped rune is a single character instead of a range, simply shift first + // rune in the range. + if toRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + fromStart += fromRangeStep + continue + } + + fromRangeSize = (fromEnd - fromStart) * fromRangeStep + toRangeSize = (toEnd - toStart) * toRangeStep + + // Not enough runes in the to pattern. Need to read more. + if fromRangeSize > toRangeSize { + fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) + fromStart += fromRangeStep + updateRange() + + // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered + // as a single rune. + if fromStart == fromEnd { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + } + + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) + updateRange() + break + } + + if fromStart == fromEnd { + fromEnd = utf8.RuneError + continue + } + + _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + fromEnd = utf8.RuneError + } + + if fromEnd != utf8.RuneError { + tr.addRune(fromEnd, toStart, singleRunes) + } + + tr.reverted = reverted + tr.mappedRune = -1 + tr.hasPattern = true + + // Translate RuneError only if in deletion or reverted mode. + if deletion || reverted { + tr.mappedRune = toStart + } + + return tr +} + +func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { + if from <= unicode.MaxASCII { + if tr.quickDict == nil { + tr.quickDict = &runeDict{} + } + + tr.quickDict.Dict[from] = to + } else { + if tr.runeMap == nil { + tr.runeMap = make(runeMap) + } + + tr.runeMap[from] = to + } + + singleRunes = append(singleRunes, from) + return singleRunes +} + +func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { + var r rune + var rrm *runeRangeMap + + if fromLo < fromHi { + rrm = &runeRangeMap{ + FromLo: fromLo, + FromHi: fromHi, + ToLo: toLo, + ToHi: toHi, + } + } else { + rrm = &runeRangeMap{ + FromLo: fromHi, + FromHi: fromLo, + ToLo: toHi, + ToHi: toLo, + } + } + + // If there is any single rune conflicts with this rune range, clear single rune record. + for _, r = range singleRunes { + if rrm.FromLo <= r && r <= rrm.FromHi { + if r <= unicode.MaxASCII { + tr.quickDict.Dict[r] = 0 + } else { + delete(tr.runeMap, r) + } + } + } + + tr.ranges = append(tr.ranges, rrm) + return fromHi, toHi +} + +func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { + var r rune + var size int + + remaining = str + escaping := false + isRange := false + + for len(remaining) > 0 { + r, size = utf8.DecodeRuneInString(remaining) + remaining = remaining[size:] + + // Parse special characters. + if !escaping { + if r == '\\' { + escaping = true + continue + } + + if r == '-' { + // Ignore slash at beginning of string. + if last == utf8.RuneError { + continue + } + + start = last + isRange = true + continue + } + } + + escaping = false + + if last != utf8.RuneError { + // This is a range which start and end are the same. + // Considier it as a normal character. + if isRange && last == r { + isRange = false + continue + } + + start = last + end = r + + if isRange { + if start < end { + rangeStep = 1 + } else { + rangeStep = -1 + } + } + + return + } + + last = r + } + + start = last + end = utf8.RuneError + return +} + +// Translate str with a from/to pattern pair. +// +// See comment in Translate function for usage and samples. +func (tr *Translator) Translate(str string) string { + if !tr.hasPattern || str == "" { + return str + } + + var r rune + var size int + var needTr bool + + orig := str + + var output *stringBuilder + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + r, needTr = tr.TranslateRune(r) + + if needTr && output == nil { + output = allocBuffer(orig, str) + } + + if r != utf8.RuneError && output != nil { + output.WriteRune(r) + } + + str = str[size:] + } + + // No character is translated. + if output == nil { + return orig + } + + return output.String() +} + +// TranslateRune return translated rune and true if r matches the from pattern. +// If r doesn't match the pattern, original r is returned and translated is false. +func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { + switch { + case tr.quickDict != nil: + if r <= unicode.MaxASCII { + result = tr.quickDict.Dict[r] + + if result != 0 { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + } + + fallthrough + + case tr.runeMap != nil: + var ok bool + + if result, ok = tr.runeMap[r]; ok { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + + fallthrough + + default: + var rrm *runeRangeMap + ranges := tr.ranges + + for i := len(ranges) - 1; i >= 0; i-- { + rrm = ranges[i] + + if rrm.FromLo <= r && r <= rrm.FromHi { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + break + } + + if rrm.ToLo < rrm.ToHi { + result = rrm.ToLo + r - rrm.FromLo + } else if rrm.ToLo > rrm.ToHi { + // ToHi can be smaller than ToLo if range is from higher to lower. + result = rrm.ToLo - r + rrm.FromLo + } else { + result = rrm.ToLo + } + + break + } + } + } + + if tr.reverted { + if !translated { + result = tr.mappedRune + } + + translated = !translated + } + + if !translated { + result = r + } + + return +} + +// HasPattern returns true if Translator has one pattern at least. +func (tr *Translator) HasPattern() bool { + return tr.hasPattern +} + +// Translate str with the characters defined in from replaced by characters defined in to. +// +// From and to are patterns representing a set of characters. Pattern is defined as following. +// +// * Special characters +// * '-' means a range of runes, e.g. +// * "a-z" means all characters from 'a' to 'z' inclusive; +// * "z-a" means all characters from 'z' to 'a' inclusive. +// * '^' as first character means a set of all runes excepted listed, e.g. +// * "^a-z" means all characters except 'a' to 'z' inclusive. +// * '\' escapes special characters. +// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. +// +// Translate will try to find a 1:1 mapping from from to to. +// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. +// +// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. +// +// If the to pattern is an empty string, Translate works exactly the same as Delete. +// +// Samples: +// Translate("hello", "aeiou", "12345") => "h2ll4" +// Translate("hello", "a-z", "A-Z") => "HELLO" +// Translate("hello", "z-a", "a-z") => "svool" +// Translate("hello", "aeiou", "*") => "h*ll*" +// Translate("hello", "^l", "*") => "**ll*" +// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" +func Translate(str, from, to string) string { + tr := NewTranslator(from, to) + return tr.Translate(str) +} + +// Delete runes in str matching the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Delete("hello", "aeiou") => "hll" +// Delete("hello", "a-k") => "llo" +// Delete("hello", "^a-k") => "he" +func Delete(str, pattern string) string { + tr := NewTranslator(pattern, "") + return tr.Translate(str) +} + +// Count how many runes in str match the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Count("hello", "aeiou") => 3 +// Count("hello", "a-k") => 3 +// Count("hello", "^a-k") => 2 +func Count(str, pattern string) int { + if pattern == "" || str == "" { + return 0 + } + + var r rune + var size int + var matched bool + + tr := NewTranslator(pattern, "") + cnt := 0 + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if _, matched = tr.TranslateRune(r); matched { + cnt++ + } + } + + return cnt +} + +// Squeeze deletes adjacent repeated runes in str. +// If pattern is not empty, only runes matching the pattern will be squeezed. +// +// Samples: +// Squeeze("hello", "") => "helo" +// Squeeze("hello", "m-z") => "hello" +// Squeeze("hello world", " ") => "hello world" +func Squeeze(str, pattern string) string { + var last, r rune + var size int + var skipSqueeze, matched bool + var tr *Translator + var output *stringBuilder + + orig := str + last = -1 + + if len(pattern) > 0 { + tr = NewTranslator(pattern, "") + } + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + // Need to squeeze the str. + if last == r && !skipSqueeze { + if tr != nil { + if _, matched = tr.TranslateRune(r); !matched { + skipSqueeze = true + } + } + + if output == nil { + output = allocBuffer(orig, str) + } + + if skipSqueeze { + output.WriteRune(r) + } + } else { + if output != nil { + output.WriteRune(r) + } + + last = r + skipSqueeze = false + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 000000000..f0fbd2e5c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 000000000..db6a6aa1a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 000000000..8089e6670 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,631 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +const tagKey = "copy" + +// Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := w.copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 000000000..ac82cd2e1 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 000000000..6a7f17611 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 000000000..70760cf4c --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 000000000..7fee7b050 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,420 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore new file mode 100644 index 000000000..8a43ce9d7 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/.gitignore @@ -0,0 +1,6 @@ +.git +*.swp + +# IntelliJ +.idea/ +*.iml diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml new file mode 100644 index 000000000..55d42b289 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.7.x + - 1.12.x + - 1.13.x + - tip + +install: + - go build . + +script: + - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md new file mode 100644 index 000000000..01ba02feb --- /dev/null +++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md @@ -0,0 +1,19 @@ +## Decimal v1.2.0 + +#### BREAKING +- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) + +#### FEATURES +- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) +- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) +- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) + +#### ENHANCEMENTS +- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) +- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) +- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) +- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) + +#### BUGFIXES +- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) +- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE new file mode 100644 index 000000000..ad2148aaf --- /dev/null +++ b/vendor/github.com/shopspring/decimal/LICENSE @@ -0,0 +1,45 @@ +The MIT License (MIT) + +Copyright (c) 2015 Spring, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +- Based on https://github.com/oguzbilgic/fpd, which has the following license: +""" +The MIT License (MIT) + +Copyright (c) 2013 Oguz Bilgic + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +""" diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md new file mode 100644 index 000000000..b70f90159 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/README.md @@ -0,0 +1,130 @@ +# decimal + +[![Build Status](https://travis-ci.org/shopspring/decimal.png?branch=master)](https://travis-ci.org/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) + +Arbitrary-precision fixed-point decimal numbers in go. + +_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point. + +## Features + + * The zero-value is 0, and is safe to use without initialization + * Addition, subtraction, multiplication with no loss of precision + * Division with specified precision + * Database/sql serialization/deserialization + * JSON and XML serialization/deserialization + +## Install + +Run `go get github.com/shopspring/decimal` + +## Requirements + +Decimal library requires Go version `>=1.7` + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/shopspring/decimal" +) + +func main() { + price, err := decimal.NewFromString("136.02") + if err != nil { + panic(err) + } + + quantity := decimal.NewFromInt(3) + + fee, _ := decimal.NewFromString(".035") + taxRate, _ := decimal.NewFromString(".08875") + + subtotal := price.Mul(quantity) + + preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) + + total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) + + fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06 + fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421 + fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375 + fmt.Println("Total:", total) // Total: 459.824961375 + fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 +} +``` + +## Documentation + +http://godoc.org/github.com/shopspring/decimal + +## Production Usage + +* [Spring](https://shopspring.com/), since August 14, 2014. +* If you are using this in production, please let us know! + +## FAQ + +#### Why don't you just use float64? + +Because float64 (or any binary floating point type, actually) can't represent +numbers such as `0.1` exactly. + +Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that +it prints out `10`, but it actually prints `9.999999999999831`. Over time, +these small errors can really add up! + +#### Why don't you just use big.Rat? + +big.Rat is fine for representing rational numbers, but Decimal is better for +representing money. Why? Here's a (contrived) example: + +Let's say you use big.Rat, and you have two numbers, x and y, both +representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one +out, the string output has to stop somewhere (let's say it stops at 3 decimal +digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did +the other 0.001 go? + +Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE + +With Decimal, the strings being printed out represent the number exactly. So, +if you have `x = y = 1/3` (with precision 3), they will actually be equal to +0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is +unaccounted for! + +You still have to be careful. If you want to split a number `N` 3 ways, you +can't just send `N/3` to three different people. You have to pick one to send +`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. + +But, it is much easier to be careful with Decimal than with big.Rat. + +#### Why isn't the API similar to big.Int's? + +big.Int's API is built to reduce the number of memory allocations for maximal +performance. This makes sense for its use-case, but the trade-off is that the +API is awkward and easy to misuse. + +For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A +developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This +modifies `a` and sets `z` as an alias for `a`, which they might not expect. It +also modifies any other aliases to `a`. + +Here's an example of the subtle bugs you can introduce with big.Int's API: +https://play.golang.org/p/x2R_78pa8r + +In contrast, it's difficult to make such mistakes with decimal. Decimals +behave like other go numbers types: even though `a = b` will not deep copy +`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods +return new Decimals and do not modify the originals. The downside is that +this causes extra allocations, so Decimal is less performant. My assumption +is that if you're using Decimals, you probably care more about correctness +than performance. + +## License + +The MIT License (MIT) + +This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go new file mode 100644 index 000000000..9958d6902 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal-go.go @@ -0,0 +1,415 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type decimal struct { + d [800]byte // digits, big-endian representation + nd int // number of digits used + dp int // decimal point + neg bool // negative flag + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) +const maxShift = uintSize - 4 + +// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. +func rightShift(a *decimal, k uint) { + r := 0 // read pointer + w := 0 // write pointer + + // Pick up enough leading digits to cover first shift. + var n uint + for ; n>>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := uint(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + var mask uint = (1 << k) - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := uint(a.d[r]) + dig := n >> k + n &= mask + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n &= mask + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + // Go up to 60 to be large enough for 32bit and 64bit platforms. + /* + seq 60 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\t{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\t{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 + {9, "37252902984619140625"}, // * 268435456 + {9, "186264514923095703125"}, // * 536870912 + {10, "931322574615478515625"}, // * 1073741824 + {10, "4656612873077392578125"}, // * 2147483648 + {10, "23283064365386962890625"}, // * 4294967296 + {10, "116415321826934814453125"}, // * 8589934592 + {11, "582076609134674072265625"}, // * 17179869184 + {11, "2910383045673370361328125"}, // * 34359738368 + {11, "14551915228366851806640625"}, // * 68719476736 + {12, "72759576141834259033203125"}, // * 137438953472 + {12, "363797880709171295166015625"}, // * 274877906944 + {12, "1818989403545856475830078125"}, // * 549755813888 + {13, "9094947017729282379150390625"}, // * 1099511627776 + {13, "45474735088646411895751953125"}, // * 2199023255552 + {13, "227373675443232059478759765625"}, // * 4398046511104 + {13, "1136868377216160297393798828125"}, // * 8796093022208 + {14, "5684341886080801486968994140625"}, // * 17592186044416 + {14, "28421709430404007434844970703125"}, // * 35184372088832 + {14, "142108547152020037174224853515625"}, // * 70368744177664 + {15, "710542735760100185871124267578125"}, // * 140737488355328 + {15, "3552713678800500929355621337890625"}, // * 281474976710656 + {15, "17763568394002504646778106689453125"}, // * 562949953421312 + {16, "88817841970012523233890533447265625"}, // * 1125899906842624 + {16, "444089209850062616169452667236328125"}, // * 2251799813685248 + {16, "2220446049250313080847263336181640625"}, // * 4503599627370496 + {16, "11102230246251565404236316680908203125"}, // * 9007199254740992 + {17, "55511151231257827021181583404541015625"}, // * 18014398509481984 + {17, "277555756156289135105907917022705078125"}, // * 36028797018963968 + {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936 + {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872 + {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744 + {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488 + {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + + // Pick up a digit, put down a digit. + var n uint + for r--; r >= 0; r-- { + n += (uint(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go new file mode 100644 index 000000000..801c1a045 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal.go @@ -0,0 +1,1477 @@ +// Package decimal implements an arbitrary precision fixed-point decimal. +// +// The zero-value of a Decimal is 0, as you would expect. +// +// The best way to create a new Decimal is to use decimal.NewFromString, ex: +// +// n, err := decimal.NewFromString("-123.4567") +// n.String() // output: "-123.4567" +// +// To use Decimal as part of a struct: +// +// type Struct struct { +// Number Decimal +// } +// +// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. +package decimal + +import ( + "database/sql/driver" + "encoding/binary" + "fmt" + "math" + "math/big" + "strconv" + "strings" +) + +// DivisionPrecision is the number of decimal places in the result when it +// doesn't divide exactly. +// +// Example: +// +// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d1.String() // output: "0.6666666666666667" +// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) +// d2.String() // output: "0.0000666666666667" +// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) +// d3.String() // output: "6666.6666666666666667" +// decimal.DivisionPrecision = 3 +// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d4.String() // output: "0.667" +// +var DivisionPrecision = 16 + +// MarshalJSONWithoutQuotes should be set to true if you want the decimal to +// be JSON marshaled as a number, instead of as a string. +// WARNING: this is dangerous for decimals with many digits, since many JSON +// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 +// double-precision floating point numbers, which means you can potentially +// silently lose precision. +var MarshalJSONWithoutQuotes = false + +// Zero constant, to make computations faster. +// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. +var Zero = New(0, 1) + +var zeroInt = big.NewInt(0) +var oneInt = big.NewInt(1) +var twoInt = big.NewInt(2) +var fourInt = big.NewInt(4) +var fiveInt = big.NewInt(5) +var tenInt = big.NewInt(10) +var twentyInt = big.NewInt(20) + +// Decimal represents a fixed-point decimal. It is immutable. +// number = value * 10 ^ exp +type Decimal struct { + value *big.Int + + // NOTE(vadim): this must be an int32, because we cast it to float64 during + // calculations. If exp is 64 bit, we might lose precision. + // If we cared about being able to represent every possible decimal, we + // could make exp a *big.Int but it would hurt performance and numbers + // like that are unrealistic. + exp int32 +} + +// New returns a new fixed-point decimal, value * 10 ^ exp. +func New(value int64, exp int32) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: exp, + } +} + +// NewFromInt converts a int64 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt(value int64) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: 0, + } +} + +// NewFromInt32 converts a int32 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt32(value int32) Decimal { + return Decimal{ + value: big.NewInt(int64(value)), + exp: 0, + } +} + +// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp +func NewFromBigInt(value *big.Int, exp int32) Decimal { + return Decimal{ + value: big.NewInt(0).Set(value), + exp: exp, + } +} + +// NewFromString returns a new Decimal from a string representation. +// Trailing zeroes are not trimmed. +// +// Example: +// +// d, err := NewFromString("-123.45") +// d2, err := NewFromString(".0001") +// d3, err := NewFromString("1.47000") +// +func NewFromString(value string) (Decimal, error) { + originalInput := value + var intString string + var exp int64 + + // Check if number is using scientific notation + eIndex := strings.IndexAny(value, "Ee") + if eIndex != -1 { + expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) + } + return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) + } + value = value[:eIndex] + exp = expInt + } + + parts := strings.Split(value, ".") + if len(parts) == 1 { + // There is no decimal point, we can just parse the original string as + // an int + intString = value + } else if len(parts) == 2 { + intString = parts[0] + parts[1] + expInt := -len(parts[1]) + exp += int64(expInt) + } else { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) + } + + dValue := new(big.Int) + _, ok := dValue.SetString(intString, 10) + if !ok { + return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + } + + if exp < math.MinInt32 || exp > math.MaxInt32 { + // NOTE(vadim): I doubt a string could realistically be this long + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) + } + + return Decimal{ + value: dValue, + exp: int32(exp), + }, nil +} + +// RequireFromString returns a new Decimal from a string representation +// or panics if NewFromString would have returned an error. +// +// Example: +// +// d := RequireFromString("-123.45") +// d2 := RequireFromString(".0001") +// +func RequireFromString(value string) Decimal { + dec, err := NewFromString(value) + if err != nil { + panic(err) + } + return dec +} + +// NewFromFloat converts a float64 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 15 digits, but may be more in some cases. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat(value float64) Decimal { + if value == 0 { + return New(0, 0) + } + return newFromFloat(value, math.Float64bits(value), &float64info) +} + +// NewFromFloat32 converts a float32 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 6-8 digits depending on the input. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat32(value float32) Decimal { + if value == 0 { + return New(0, 0) + } + // XOR is workaround for https://github.com/golang/go/issues/26285 + a := math.Float32bits(value) ^ 0x80808080 + return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) +} + +func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { + if math.IsNaN(val) || math.IsInf(val, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", val)) + } + exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 + + roundShortest(&d, mant, exp, flt) + // If less than 19 digits, we can do calculation in an int64. + if d.nd < 19 { + tmp := int64(0) + m := int64(1) + for i := d.nd - 1; i >= 0; i-- { + tmp += m * int64(d.d[i]-'0') + m *= 10 + } + if d.neg { + tmp *= -1 + } + return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} + } + dValue := new(big.Int) + dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) + if ok { + return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} + } + + return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) +} + +// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary +// number of fractional digits. +// +// Example: +// +// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" +// +func NewFromFloatWithExponent(value float64, exp int32) Decimal { + if math.IsNaN(value) || math.IsInf(value, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) + } + + bits := math.Float64bits(value) + mant := bits & (1<<52 - 1) + exp2 := int32((bits >> 52) & (1<<11 - 1)) + sign := bits >> 63 + + if exp2 == 0 { + // specials + if mant == 0 { + return Decimal{} + } + // subnormal + exp2++ + } else { + // normal + mant |= 1 << 52 + } + + exp2 -= 1023 + 52 + + // normalizing base-2 values + for mant&1 == 0 { + mant = mant >> 1 + exp2++ + } + + // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 + if exp < 0 && exp < exp2 { + if exp2 < 0 { + exp = exp2 + } else { + exp = 0 + } + } + + // representing 10^M * 2^N as 5^M * 2^(M+N) + exp2 -= exp + + temp := big.NewInt(1) + dMant := big.NewInt(int64(mant)) + + // applying 5^M + if exp > 0 { + temp = temp.SetInt64(int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + } else if exp < 0 { + temp = temp.SetInt64(-int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + dMant = dMant.Mul(dMant, temp) + temp = temp.SetUint64(1) + } + + // applying 2^(M+N) + if exp2 > 0 { + dMant = dMant.Lsh(dMant, uint(exp2)) + } else if exp2 < 0 { + temp = temp.Lsh(temp, uint(-exp2)) + } + + // rounding and downscaling + if exp > 0 || exp2 < 0 { + halfDown := new(big.Int).Rsh(temp, 1) + dMant = dMant.Add(dMant, halfDown) + dMant = dMant.Quo(dMant, temp) + } + + if sign == 1 { + dMant = dMant.Neg(dMant) + } + + return Decimal{ + value: dMant, + exp: exp, + } +} + +// rescale returns a rescaled version of the decimal. Returned +// decimal may be less precise if the given exponent is bigger +// than the initial exponent of the Decimal. +// NOTE: this will truncate, NOT round +// +// Example: +// +// d := New(12345, -4) +// d2 := d.rescale(-1) +// d3 := d2.rescale(-4) +// println(d1) +// println(d2) +// println(d3) +// +// Output: +// +// 1.2345 +// 1.2 +// 1.2000 +// +func (d Decimal) rescale(exp int32) Decimal { + d.ensureInitialized() + + if d.exp == exp { + return Decimal{ + new(big.Int).Set(d.value), + d.exp, + } + } + + // NOTE(vadim): must convert exps to float64 before - to prevent overflow + diff := math.Abs(float64(exp) - float64(d.exp)) + value := new(big.Int).Set(d.value) + + expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) + if exp > d.exp { + value = value.Quo(value, expScale) + } else if exp < d.exp { + value = value.Mul(value, expScale) + } + + return Decimal{ + value: value, + exp: exp, + } +} + +// Abs returns the absolute value of the decimal. +func (d Decimal) Abs() Decimal { + d.ensureInitialized() + d2Value := new(big.Int).Abs(d.value) + return Decimal{ + value: d2Value, + exp: d.exp, + } +} + +// Add returns d + d2. +func (d Decimal) Add(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Add(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Sub returns d - d2. +func (d Decimal) Sub(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Sub(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Neg returns -d. +func (d Decimal) Neg() Decimal { + d.ensureInitialized() + val := new(big.Int).Neg(d.value) + return Decimal{ + value: val, + exp: d.exp, + } +} + +// Mul returns d * d2. +func (d Decimal) Mul(d2 Decimal) Decimal { + d.ensureInitialized() + d2.ensureInitialized() + + expInt64 := int64(d.exp) + int64(d2.exp) + if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { + // NOTE(vadim): better to panic than give incorrect results, as + // Decimals are usually used for money + panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64)) + } + + d3Value := new(big.Int).Mul(d.value, d2.value) + return Decimal{ + value: d3Value, + exp: int32(expInt64), + } +} + +// Shift shifts the decimal in base 10. +// It shifts left when shift is positive and right if shift is negative. +// In simpler terms, the given value for shift is added to the exponent +// of the decimal. +func (d Decimal) Shift(shift int32) Decimal { + d.ensureInitialized() + return Decimal{ + value: new(big.Int).Set(d.value), + exp: d.exp + shift, + } +} + +// Div returns d / d2. If it doesn't divide exactly, the result will have +// DivisionPrecision digits after the decimal point. +func (d Decimal) Div(d2 Decimal) Decimal { + return d.DivRound(d2, int32(DivisionPrecision)) +} + +// QuoRem does divsion with remainder +// d.QuoRem(d2,precision) returns quotient q and remainder r such that +// d = d2 * q + r, q an integer multiple of 10^(-precision) +// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 +// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// Note that precision<0 is allowed as input. +func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { + d.ensureInitialized() + d2.ensureInitialized() + if d2.value.Sign() == 0 { + panic("decimal division by 0") + } + scale := -precision + e := int64(d.exp - d2.exp - scale) + if e > math.MaxInt32 || e < math.MinInt32 { + panic("overflow in decimal QuoRem") + } + var aa, bb, expo big.Int + var scalerest int32 + // d = a 10^ea + // d2 = b 10^eb + if e < 0 { + aa = *d.value + expo.SetInt64(-e) + bb.Exp(tenInt, &expo, nil) + bb.Mul(d2.value, &bb) + scalerest = d.exp + // now aa = a + // bb = b 10^(scale + eb - ea) + } else { + expo.SetInt64(e) + aa.Exp(tenInt, &expo, nil) + aa.Mul(d.value, &aa) + bb = *d2.value + scalerest = scale + d2.exp + // now aa = a ^ (ea - eb - scale) + // bb = b + } + var q, r big.Int + q.QuoRem(&aa, &bb, &r) + dq := Decimal{value: &q, exp: scale} + dr := Decimal{value: &r, exp: scalerest} + return dq, dr +} + +// DivRound divides and rounds to a given precision +// i.e. to an integer multiple of 10^(-precision) +// for a positive quotient digit 5 is rounded up, away from 0 +// if the quotient is negative then digit 5 is rounded down, away from 0 +// Note that precision<0 is allowed as input. +func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { + // QuoRem already checks initialization + q, r := d.QuoRem(d2, precision) + // the actual rounding decision is based on comparing r*10^precision and d2/2 + // instead compare 2 r 10 ^precision and d2 + var rv2 big.Int + rv2.Abs(r.value) + rv2.Lsh(&rv2, 1) + // now rv2 = abs(r.value) * 2 + r2 := Decimal{value: &rv2, exp: r.exp + precision} + // r2 is now 2 * r * 10 ^ precision + var c = r2.Cmp(d2.Abs()) + + if c < 0 { + return q + } + + if d.value.Sign()*d2.value.Sign() < 0 { + return q.Sub(New(1, -precision)) + } + + return q.Add(New(1, -precision)) +} + +// Mod returns d % d2. +func (d Decimal) Mod(d2 Decimal) Decimal { + quo := d.Div(d2).Truncate(0) + return d.Sub(d2.Mul(quo)) +} + +// Pow returns d to the power d2 +func (d Decimal) Pow(d2 Decimal) Decimal { + var temp Decimal + if d2.IntPart() == 0 { + return NewFromFloat(1) + } + temp = d.Pow(d2.Div(NewFromFloat(2))) + if d2.IntPart()%2 == 0 { + return temp.Mul(temp) + } + if d2.IntPart() > 0 { + return temp.Mul(temp).Mul(d) + } + return temp.Mul(temp).Div(d) +} + +// Cmp compares the numbers represented by d and d2 and returns: +// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 +// +func (d Decimal) Cmp(d2 Decimal) int { + d.ensureInitialized() + d2.ensureInitialized() + + if d.exp == d2.exp { + return d.value.Cmp(d2.value) + } + + rd, rd2 := RescalePair(d, d2) + + return rd.value.Cmp(rd2.value) +} + +// Equal returns whether the numbers represented by d and d2 are equal. +func (d Decimal) Equal(d2 Decimal) bool { + return d.Cmp(d2) == 0 +} + +// Equals is deprecated, please use Equal method instead +func (d Decimal) Equals(d2 Decimal) bool { + return d.Equal(d2) +} + +// GreaterThan (GT) returns true when d is greater than d2. +func (d Decimal) GreaterThan(d2 Decimal) bool { + return d.Cmp(d2) == 1 +} + +// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. +func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == 1 || cmp == 0 +} + +// LessThan (LT) returns true when d is less than d2. +func (d Decimal) LessThan(d2 Decimal) bool { + return d.Cmp(d2) == -1 +} + +// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. +func (d Decimal) LessThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == -1 || cmp == 0 +} + +// Sign returns: +// +// -1 if d < 0 +// 0 if d == 0 +// +1 if d > 0 +// +func (d Decimal) Sign() int { + if d.value == nil { + return 0 + } + return d.value.Sign() +} + +// IsPositive return +// +// true if d > 0 +// false if d == 0 +// false if d < 0 +func (d Decimal) IsPositive() bool { + return d.Sign() == 1 +} + +// IsNegative return +// +// true if d < 0 +// false if d == 0 +// false if d > 0 +func (d Decimal) IsNegative() bool { + return d.Sign() == -1 +} + +// IsZero return +// +// true if d == 0 +// false if d > 0 +// false if d < 0 +func (d Decimal) IsZero() bool { + return d.Sign() == 0 +} + +// Exponent returns the exponent, or scale component of the decimal. +func (d Decimal) Exponent() int32 { + return d.exp +} + +// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() +func (d Decimal) Coefficient() *big.Int { + d.ensureInitialized() + // we copy the coefficient so that mutating the result does not mutate the + // Decimal. + return big.NewInt(0).Set(d.value) +} + +// IntPart returns the integer component of the decimal. +func (d Decimal) IntPart() int64 { + scaledD := d.rescale(0) + return scaledD.value.Int64() +} + +// BigInt returns integer component of the decimal as a BigInt. +func (d Decimal) BigInt() *big.Int { + scaledD := d.rescale(0) + i := &big.Int{} + i.SetString(scaledD.String(), 10) + return i +} + +// BigFloat returns decimal as BigFloat. +// Be aware that casting decimal to BigFloat might cause a loss of precision. +func (d Decimal) BigFloat() *big.Float { + f := &big.Float{} + f.SetString(d.String()) + return f +} + +// Rat returns a rational number representation of the decimal. +func (d Decimal) Rat() *big.Rat { + d.ensureInitialized() + if d.exp <= 0 { + // NOTE(vadim): must negate after casting to prevent int32 overflow + denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) + return new(big.Rat).SetFrac(d.value, denom) + } + + mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) + num := new(big.Int).Mul(d.value, mul) + return new(big.Rat).SetFrac(num, oneInt) +} + +// Float64 returns the nearest float64 value for d and a bool indicating +// whether f represents d exactly. +// For more details, see the documentation for big.Rat.Float64 +func (d Decimal) Float64() (f float64, exact bool) { + return d.Rat().Float64() +} + +// String returns the string representation of the decimal +// with the fixed point. +// +// Example: +// +// d := New(-12345, -3) +// println(d.String()) +// +// Output: +// +// -12.345 +// +func (d Decimal) String() string { + return d.string(true) +} + +// StringFixed returns a rounded fixed-point string with places digits after +// the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixed(2) // output: "0.00" +// NewFromFloat(0).StringFixed(0) // output: "0" +// NewFromFloat(5.45).StringFixed(0) // output: "5" +// NewFromFloat(5.45).StringFixed(1) // output: "5.5" +// NewFromFloat(5.45).StringFixed(2) // output: "5.45" +// NewFromFloat(5.45).StringFixed(3) // output: "5.450" +// NewFromFloat(545).StringFixed(-1) // output: "550" +// +func (d Decimal) StringFixed(places int32) string { + rounded := d.Round(places) + return rounded.string(false) +} + +// StringFixedBank returns a banker rounded fixed-point string with places digits +// after the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixedBank(2) // output: "0.00" +// NewFromFloat(0).StringFixedBank(0) // output: "0" +// NewFromFloat(5.45).StringFixedBank(0) // output: "5" +// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" +// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" +// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" +// NewFromFloat(545).StringFixedBank(-1) // output: "540" +// +func (d Decimal) StringFixedBank(places int32) string { + rounded := d.RoundBank(places) + return rounded.string(false) +} + +// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For +// more details see the documentation at function RoundCash. +func (d Decimal) StringFixedCash(interval uint8) string { + rounded := d.RoundCash(interval) + return rounded.string(false) +} + +// Round rounds the decimal to places decimal places. +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Example: +// +// NewFromFloat(5.45).Round(1).String() // output: "5.5" +// NewFromFloat(545).Round(-1).String() // output: "550" +// +func (d Decimal) Round(places int32) Decimal { + // truncate to places + 1 + ret := d.rescale(-places - 1) + + // add sign(d) * 0.5 + if ret.value.Sign() < 0 { + ret.value.Sub(ret.value, fiveInt) + } else { + ret.value.Add(ret.value, fiveInt) + } + + // floor for positive numbers, ceil for negative numbers + _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) + ret.exp++ + if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { + ret.value.Add(ret.value, oneInt) + } + + return ret +} + +// RoundBank rounds the decimal to places decimal places. +// If the final digit to round is equidistant from the nearest two integers the +// rounded value is taken as the even number +// +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Examples: +// +// NewFromFloat(5.45).Round(1).String() // output: "5.4" +// NewFromFloat(545).Round(-1).String() // output: "540" +// NewFromFloat(5.46).Round(1).String() // output: "5.5" +// NewFromFloat(546).Round(-1).String() // output: "550" +// NewFromFloat(5.55).Round(1).String() // output: "5.6" +// NewFromFloat(555).Round(-1).String() // output: "560" +// +func (d Decimal) RoundBank(places int32) Decimal { + + round := d.Round(places) + remainder := d.Sub(round).Abs() + + half := New(5, -places-1) + if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { + if round.value.Sign() < 0 { + round.value.Add(round.value, oneInt) + } else { + round.value.Sub(round.value, oneInt) + } + } + + return round +} + +// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific +// interval. The amount payable for a cash transaction is rounded to the nearest +// multiple of the minimum currency unit available. The following intervals are +// available: 5, 10, 25, 50 and 100; any other number throws a panic. +// 5: 5 cent rounding 3.43 => 3.45 +// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) +// 25: 25 cent rounding 3.41 => 3.50 +// 50: 50 cent rounding 3.75 => 4.00 +// 100: 100 cent rounding 3.50 => 4.00 +// For more details: https://en.wikipedia.org/wiki/Cash_rounding +func (d Decimal) RoundCash(interval uint8) Decimal { + var iVal *big.Int + switch interval { + case 5: + iVal = twentyInt + case 10: + iVal = tenInt + case 25: + iVal = fourInt + case 50: + iVal = twoInt + case 100: + iVal = oneInt + default: + panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval)) + } + dVal := Decimal{ + value: iVal, + } + + // TODO: optimize those calculations to reduce the high allocations (~29 allocs). + return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) +} + +// Floor returns the nearest integer value less than or equal to d. +func (d Decimal) Floor() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z := new(big.Int).Div(d.value, exp) + return Decimal{value: z, exp: 0} +} + +// Ceil returns the nearest integer value greater than or equal to d. +func (d Decimal) Ceil() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) + if m.Cmp(zeroInt) != 0 { + z.Add(z, oneInt) + } + return Decimal{value: z, exp: 0} +} + +// Truncate truncates off digits from the number, without rounding. +// +// NOTE: precision is the last digit that will not be truncated (must be >= 0). +// +// Example: +// +// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" +// +func (d Decimal) Truncate(precision int32) Decimal { + d.ensureInitialized() + if precision >= 0 && -precision > d.exp { + return d.rescale(-precision) + } + return d +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + return nil + } + + str, err := unquoteIfQuoted(decimalBytes) + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err) + } + + decimal, err := NewFromString(str) + *d = decimal + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Decimal) MarshalJSON() ([]byte, error) { + var str string + if MarshalJSONWithoutQuotes { + str = d.String() + } else { + str = "\"" + d.String() + "\"" + } + return []byte(str), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation +// is already used when encoding to text, this method stores that string as []byte +func (d *Decimal) UnmarshalBinary(data []byte) error { + // Extract the exponent + d.exp = int32(binary.BigEndian.Uint32(data[:4])) + + // Extract the value + d.value = new(big.Int) + return d.value.GobDecode(data[4:]) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Decimal) MarshalBinary() (data []byte, err error) { + // Write the exponent first since it's a fixed size + v1 := make([]byte, 4) + binary.BigEndian.PutUint32(v1, uint32(d.exp)) + + // Add the value + var v2 []byte + if v2, err = d.value.GobEncode(); err != nil { + return + } + + // Return the byte array + data = append(v1, v2...) + return +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *Decimal) Scan(value interface{}) error { + // first try to see if the data is stored in database as a Numeric datatype + switch v := value.(type) { + + case float32: + *d = NewFromFloat(float64(v)) + return nil + + case float64: + // numeric in sqlite3 sends us float64 + *d = NewFromFloat(v) + return nil + + case int64: + // at least in sqlite3 when the value is 0 in db, the data is sent + // to us as an int64 instead of a float64 ... + *d = New(v, 0) + return nil + + default: + // default is trying to interpret value stored as string + str, err := unquoteIfQuoted(v) + if err != nil { + return err + } + *d, err = NewFromString(str) + return err + } +} + +// Value implements the driver.Valuer interface for database serialization. +func (d Decimal) Value() (driver.Value, error) { + return d.String(), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for XML +// deserialization. +func (d *Decimal) UnmarshalText(text []byte) error { + str := string(text) + + dec, err := NewFromString(str) + *d = dec + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface for XML +// serialization. +func (d Decimal) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +// GobEncode implements the gob.GobEncoder interface for gob serialization. +func (d Decimal) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface for gob serialization. +func (d *Decimal) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// StringScaled first scales the decimal then calls .String() on it. +// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. +func (d Decimal) StringScaled(exp int32) string { + return d.rescale(exp).String() +} + +func (d Decimal) string(trimTrailingZeros bool) string { + if d.exp >= 0 { + return d.rescale(0).value.String() + } + + abs := new(big.Int).Abs(d.value) + str := abs.String() + + var intPart, fractionalPart string + + // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN + // and you are on a 32-bit machine. Won't fix this super-edge case. + dExpInt := int(d.exp) + if len(str) > -dExpInt { + intPart = str[:len(str)+dExpInt] + fractionalPart = str[len(str)+dExpInt:] + } else { + intPart = "0" + + num0s := -dExpInt - len(str) + fractionalPart = strings.Repeat("0", num0s) + str + } + + if trimTrailingZeros { + i := len(fractionalPart) - 1 + for ; i >= 0; i-- { + if fractionalPart[i] != '0' { + break + } + } + fractionalPart = fractionalPart[:i+1] + } + + number := intPart + if len(fractionalPart) > 0 { + number += "." + fractionalPart + } + + if d.value.Sign() < 0 { + return "-" + number + } + + return number +} + +func (d *Decimal) ensureInitialized() { + if d.value == nil { + d.value = new(big.Int) + } +} + +// Min returns the smallest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Min(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Min with 0 arguments. +func Min(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) < 0 { + ans = item + } + } + return ans +} + +// Max returns the largest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Max(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Max with 0 arguments. +func Max(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) > 0 { + ans = item + } + } + return ans +} + +// Sum returns the combined total of the provided first and rest Decimals +func Sum(first Decimal, rest ...Decimal) Decimal { + total := first + for _, item := range rest { + total = total.Add(item) + } + + return total +} + +// Avg returns the average value of the provided first and rest Decimals +func Avg(first Decimal, rest ...Decimal) Decimal { + count := New(int64(len(rest)+1), 0) + sum := Sum(first, rest...) + return sum.Div(count) +} + +// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) +func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { + d1.ensureInitialized() + d2.ensureInitialized() + + if d1.exp == d2.exp { + return d1, d2 + } + + baseScale := min(d1.exp, d2.exp) + if baseScale != d1.exp { + return d1.rescale(baseScale), d2 + } + return d1, d2.rescale(baseScale) +} + +func min(x, y int32) int32 { + if x >= y { + return y + } + return x +} + +func unquoteIfQuoted(value interface{}) (string, error) { + var bytes []byte + + switch v := value.(type) { + case string: + bytes = []byte(v) + case []byte: + bytes = v + default: + return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", + value, value) + } + + // If the amount is quoted, strip the quotes + if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' { + bytes = bytes[1 : len(bytes)-1] + } + return string(bytes), nil +} + +// NullDecimal represents a nullable decimal with compatibility for +// scanning null values from the database. +type NullDecimal struct { + Decimal Decimal + Valid bool +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *NullDecimal) Scan(value interface{}) error { + if value == nil { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.Scan(value) +} + +// Value implements the driver.Valuer interface for database serialization. +func (d NullDecimal) Value() (driver.Value, error) { + if !d.Valid { + return nil, nil + } + return d.Decimal.Value() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.UnmarshalJSON(decimalBytes) +} + +// MarshalJSON implements the json.Marshaler interface. +func (d NullDecimal) MarshalJSON() ([]byte, error) { + if !d.Valid { + return []byte("null"), nil + } + return d.Decimal.MarshalJSON() +} + +// Trig functions + +// Atan returns the arctangent, in radians, of x. +func (d Decimal) Atan() Decimal { + if d.Equal(NewFromFloat(0.0)) { + return d + } + if d.GreaterThan(NewFromFloat(0.0)) { + return d.satan() + } + return d.Neg().satan().Neg() +} + +func (d Decimal) xatan() Decimal { + P0 := NewFromFloat(-8.750608600031904122785e-01) + P1 := NewFromFloat(-1.615753718733365076637e+01) + P2 := NewFromFloat(-7.500855792314704667340e+01) + P3 := NewFromFloat(-1.228866684490136173410e+02) + P4 := NewFromFloat(-6.485021904942025371773e+01) + Q0 := NewFromFloat(2.485846490142306297962e+01) + Q1 := NewFromFloat(1.650270098316988542046e+02) + Q2 := NewFromFloat(4.328810604912902668951e+02) + Q3 := NewFromFloat(4.853903996359136964868e+02) + Q4 := NewFromFloat(1.945506571482613964425e+02) + z := d.Mul(d) + b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) + b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) + z = b1.Div(b2) + z = d.Mul(z).Add(d) + return z +} + +// satan reduces its argument (known to be positive) +// to the range [0, 0.66] and calls xatan. +func (d Decimal) satan() Decimal { + Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits + Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) + pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) + + if d.LessThanOrEqual(NewFromFloat(0.66)) { + return d.xatan() + } + if d.GreaterThan(Tan3pio8) { + return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) + } + return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) +} + +// sin coefficients +var _sin = [...]Decimal{ + NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd + NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d + NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 + NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 + NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 + NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 +} + +// Sin returns the sine of the radian argument x. +func (d Decimal) Sin() Decimal { + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } else { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } + if sign { + y = y.Neg() + } + return y +} + +// cos coefficients +var _cos = [...]Decimal{ + NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b + NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 + NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 + NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 + NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 + NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b +} + +// Cos returns the cosine of the radian argument x. +func (d Decimal) Cos() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + // make argument positive + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + if j > 1 { + sign = !sign + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } else { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } + if sign { + y = y.Neg() + } + return y +} + +var _tanP = [...]Decimal{ + NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 + NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd + NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 +} +var _tanQ = [...]Decimal{ + NewFromFloat(1.00000000000000000000e+0), + NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 + NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 + NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef + NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 +} + +// Tan returns the tangent of the radian argument x. +func (d Decimal) Tan() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if zz.GreaterThan(NewFromFloat(1e-14)) { + w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) + x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) + y = z.Add(z.Mul(w.Div(x))) + } else { + y = z + } + if j&2 == 2 { + y = NewFromFloat(-1.0).Div(y) + } + if sign { + y = y.Neg() + } + return y +} diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go new file mode 100644 index 000000000..8008f55cb --- /dev/null +++ b/vendor/github.com/shopspring/decimal/rounding.go @@ -0,0 +1,119 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely reconstructed. +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1<. +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Package cast provides easy and safe casting in Go. +package cast + +import "time" + +// ToBool casts an interface to a bool type. +func ToBool(i interface{}) bool { + v, _ := ToBoolE(i) + return v +} + +// ToTime casts an interface to a time.Time type. +func ToTime(i interface{}) time.Time { + v, _ := ToTimeE(i) + return v +} + +func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + +// ToDuration casts an interface to a time.Duration type. +func ToDuration(i interface{}) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToFloat64 casts an interface to a float64 type. +func ToFloat64(i interface{}) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToFloat32 casts an interface to a float32 type. +func ToFloat32(i interface{}) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToInt64 casts an interface to an int64 type. +func ToInt64(i interface{}) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToInt32 casts an interface to an int32 type. +func ToInt32(i interface{}) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt16 casts an interface to an int16 type. +func ToInt16(i interface{}) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt8 casts an interface to an int8 type. +func ToInt8(i interface{}) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt casts an interface to an int type. +func ToInt(i interface{}) int { + v, _ := ToIntE(i) + return v +} + +// ToUint casts an interface to a uint type. +func ToUint(i interface{}) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint64 casts an interface to a uint64 type. +func ToUint64(i interface{}) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToUint32 casts an interface to a uint32 type. +func ToUint32(i interface{}) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint16 casts an interface to a uint16 type. +func ToUint16(i interface{}) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint8 casts an interface to a uint8 type. +func ToUint8(i interface{}) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToString casts an interface to a string type. +func ToString(i interface{}) string { + v, _ := ToStringE(i) + return v +} + +// ToStringMapString casts an interface to a map[string]string type. +func ToStringMapString(i interface{}) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts an interface to a map[string][]string type. +func ToStringMapStringSlice(i interface{}) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts an interface to a map[string]bool type. +func ToStringMapBool(i interface{}) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMapInt casts an interface to a map[string]int type. +func ToStringMapInt(i interface{}) map[string]int { + v, _ := ToStringMapIntE(i) + return v +} + +// ToStringMapInt64 casts an interface to a map[string]int64 type. +func ToStringMapInt64(i interface{}) map[string]int64 { + v, _ := ToStringMapInt64E(i) + return v +} + +// ToStringMap casts an interface to a map[string]interface{} type. +func ToStringMap(i interface{}) map[string]interface{} { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts an interface to a []interface{} type. +func ToSlice(i interface{}) []interface{} { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts an interface to a []bool type. +func ToBoolSlice(i interface{}) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts an interface to a []string type. +func ToStringSlice(i interface{}) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts an interface to a []int type. +func ToIntSlice(i interface{}) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToDurationSlice casts an interface to a []time.Duration type. +func ToDurationSlice(i interface{}) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go new file mode 100644 index 000000000..c04af6a97 --- /dev/null +++ b/vendor/github.com/spf13/cast/caste.go @@ -0,0 +1,1337 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "html/template" + "reflect" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +// ToTimeE casts an interface to a time.Time type. +func ToTimeE(i interface{}) (tim time.Time, err error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { + i = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDateInDefaultLocation(v, location) + case int: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + default: + return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) + } +} + +// ToDurationE casts an interface to a time.Duration type. +func ToDurationE(i interface{}) (d time.Duration, err error) { + i = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: + d = time.Duration(ToInt64(s)) + return + case float32, float64: + d = time.Duration(ToFloat64(s)) + return + case string: + if strings.ContainsAny(s, "nsuµmh") { + d, err = time.ParseDuration(s) + } else { + d, err = time.ParseDuration(s + "ns") + } + return + default: + err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) + return + } +} + +// ToBoolE casts an interface to a bool type. +func ToBoolE(i interface{}) (bool, error) { + i = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + if i.(int) != 0 { + return true, nil + } + return false, nil + case string: + return strconv.ParseBool(i.(string)) + default: + return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) + } +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i interface{}) (float64, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return s, nil + case float32: + return float64(s), nil + case int: + return float64(s), nil + case int64: + return float64(s), nil + case int32: + return float64(s), nil + case int16: + return float64(s), nil + case int8: + return float64(s), nil + case uint: + return float64(s), nil + case uint64: + return float64(s), nil + case uint32: + return float64(s), nil + case uint16: + return float64(s), nil + case uint8: + return float64(s), nil + case string: + v, err := strconv.ParseFloat(s, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + } +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i interface{}) (float32, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return float32(s), nil + case float32: + return s, nil + case int: + return float32(s), nil + case int64: + return float32(s), nil + case int32: + return float32(s), nil + case int16: + return float32(s), nil + case int8: + return float32(s), nil + case uint: + return float32(s), nil + case uint64: + return float32(s), nil + case uint32: + return float32(s), nil + case uint16: + return float32(s), nil + case uint8: + return float32(s), nil + case string: + v, err := strconv.ParseFloat(s, 32) + if err == nil { + return float32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + } +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i interface{}) (int64, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int64(s), nil + case int64: + return s, nil + case int32: + return int64(s), nil + case int16: + return int64(s), nil + case int8: + return int64(s), nil + case uint: + return int64(s), nil + case uint64: + return int64(s), nil + case uint32: + return int64(s), nil + case uint16: + return int64(s), nil + case uint8: + return int64(s), nil + case float64: + return int64(s), nil + case float32: + return int64(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + } +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i interface{}) (int32, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int32(s), nil + case int64: + return int32(s), nil + case int32: + return s, nil + case int16: + return int32(s), nil + case int8: + return int32(s), nil + case uint: + return int32(s), nil + case uint64: + return int32(s), nil + case uint32: + return int32(s), nil + case uint16: + return int32(s), nil + case uint8: + return int32(s), nil + case float64: + return int32(s), nil + case float32: + return int32(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + } +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i interface{}) (int16, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int16(s), nil + case int64: + return int16(s), nil + case int32: + return int16(s), nil + case int16: + return s, nil + case int8: + return int16(s), nil + case uint: + return int16(s), nil + case uint64: + return int16(s), nil + case uint32: + return int16(s), nil + case uint16: + return int16(s), nil + case uint8: + return int16(s), nil + case float64: + return int16(s), nil + case float32: + return int16(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + } +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i interface{}) (int8, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int8(s), nil + case int64: + return int8(s), nil + case int32: + return int8(s), nil + case int16: + return int8(s), nil + case int8: + return s, nil + case uint: + return int8(s), nil + case uint64: + return int8(s), nil + case uint32: + return int8(s), nil + case uint16: + return int8(s), nil + case uint8: + return int8(s), nil + case float64: + return int8(s), nil + case float32: + return int8(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + } +} + +// ToIntE casts an interface to an int type. +func ToIntE(i interface{}) (int, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return s, nil + case int64: + return int(s), nil + case int32: + return int(s), nil + case int16: + return int(s), nil + case int8: + return int(s), nil + case uint: + return int(s), nil + case uint64: + return int(s), nil + case uint32: + return int(s), nil + case uint16: + return int(s), nil + case uint8: + return int(s), nil + case float64: + return int(s), nil + case float32: + return int(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + } +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i interface{}) (uint, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 0) + if err == nil { + return uint(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case uint: + return s, nil + case uint64: + return uint(s), nil + case uint32: + return uint(s), nil + case uint16: + return uint(s), nil + case uint8: + return uint(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) + } +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i interface{}) (uint64, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case uint: + return uint64(s), nil + case uint64: + return s, nil + case uint32: + return uint64(s), nil + case uint16: + return uint64(s), nil + case uint8: + return uint64(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) + } +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i interface{}) (uint32, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 32) + if err == nil { + return uint32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case uint: + return uint32(s), nil + case uint64: + return uint32(s), nil + case uint32: + return s, nil + case uint16: + return uint32(s), nil + case uint8: + return uint32(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) + } +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i interface{}) (uint16, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 16) + if err == nil { + return uint16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case uint: + return uint16(s), nil + case uint64: + return uint16(s), nil + case uint32: + return uint16(s), nil + case uint16: + return s, nil + case uint8: + return uint16(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) + } +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i interface{}) (uint8, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 8) + if err == nil { + return uint8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case uint: + return uint8(s), nil + case uint64: + return uint8(s), nil + case uint32: + return uint8(s), nil + case uint16: + return uint8(s), nil + case uint8: + return s, nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) + } +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(a interface{}) interface{} { + if a == nil { + return nil + } + if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return a + } + v := reflect.ValueOf(a) + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirectToStringerOrError returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer +// or error, +func indirectToStringerOrError(a interface{}) interface{} { + if a == nil { + return nil + } + + var errorType = reflect.TypeOf((*error)(nil)).Elem() + var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + + v := reflect.ValueOf(a) + for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// ToStringE casts an interface to a string type. +func ToStringE(i interface{}) (string, error) { + i = indirectToStringerOrError(i) + + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int64: + return strconv.FormatInt(s, 10), nil + case int32: + return strconv.Itoa(int(s)), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case uint: + return strconv.FormatUint(uint64(s), 10), nil + case uint64: + return strconv.FormatUint(uint64(s), 10), nil + case uint32: + return strconv.FormatUint(uint64(s), 10), nil + case uint16: + return strconv.FormatUint(uint64(s), 10), nil + case uint8: + return strconv.FormatUint(uint64(s), 10), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) + } +} + +// ToStringMapStringE casts an interface to a map[string]string type. +func ToStringMapStringE(i interface{}) (map[string]string, error) { + var m = map[string]string{} + + switch v := i.(type) { + case map[string]string: + return v, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) + } +} + +// ToStringMapStringSliceE casts an interface to a map[string][]string type. +func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { + var m = map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]interface{}: + for k, val := range v { + switch vt := val.(type) { + case []interface{}: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[interface{}][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + return m, nil +} + +// ToStringMapBoolE casts an interface to a map[string]bool type. +func ToStringMapBoolE(i interface{}) (map[string]bool, error) { + var m = map[string]bool{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]bool: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) + } +} + +// ToStringMapE casts an interface to a map[string]interface{} type. +func ToStringMapE(i interface{}) (map[string]interface{}, error) { + var m = map[string]interface{}{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = val + } + return m, nil + case map[string]interface{}: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) + } +} + +// ToStringMapIntE casts an interface to a map[string]int{} type. +func ToStringMapIntE(i interface{}) (map[string]int, error) { + var m = map[string]int{} + if i == nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToInt(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[k] = ToInt(val) + } + return m, nil + case map[string]int: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + for _, keyVal := range v.MapKeys() { + val, err := ToIntE(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + return m, nil +} + +// ToStringMapInt64E casts an interface to a map[string]int64{} type. +func ToStringMapInt64E(i interface{}) (map[string]int64, error) { + var m = map[string]int64{} + if i == nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToInt64(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[k] = ToInt64(val) + } + return m, nil + case map[string]int64: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + for _, keyVal := range v.MapKeys() { + val, err := ToInt64E(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + return m, nil +} + +// ToSliceE casts an interface to a []interface{} type. +func ToSliceE(i interface{}) ([]interface{}, error) { + var s []interface{} + + switch v := i.(type) { + case []interface{}: + return append(s, v...), nil + case []map[string]interface{}: + for _, u := range v { + s = append(s, u) + } + return s, nil + default: + return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) + } +} + +// ToBoolSliceE casts an interface to a []bool type. +func ToBoolSliceE(i interface{}) ([]bool, error) { + if i == nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + + switch v := i.(type) { + case []bool: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]bool, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToBoolE(s.Index(j).Interface()) + if err != nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + a[j] = val + } + return a, nil + default: + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } +} + +// ToStringSliceE casts an interface to a []string type. +func ToStringSliceE(i interface{}) ([]string, error) { + var a []string + + switch v := i.(type) { + case []interface{}: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []string: + return v, nil + case []int8: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case string: + return strings.Fields(v), nil + case []error: + for _, err := range i.([]error) { + a = append(a, err.Error()) + } + return a, nil + case interface{}: + str, err := ToStringE(v) + if err != nil { + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } + return []string{str}, nil + default: + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } +} + +// ToIntSliceE casts an interface to a []int type. +func ToIntSliceE(i interface{}) ([]int, error) { + if i == nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + + switch v := i.(type) { + case []int: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]int, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToIntE(s.Index(j).Interface()) + if err != nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + a[j] = val + } + return a, nil + default: + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } +} + +// ToDurationSliceE casts an interface to a []time.Duration type. +func ToDurationSliceE(i interface{}) ([]time.Duration, error) { + if i == nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + + switch v := i.(type) { + case []time.Duration: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]time.Duration, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToDurationE(s.Index(j).Interface()) + if err != nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + a[j] = val + } + return a, nil + default: + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } +} + +// StringToDate attempts to parse a string into a time.Time type using a +// predefined list of formats. If no suitable format is found, an error is +// returned. +func StringToDate(s string) (time.Time, error) { + return parseDateWith(s, time.UTC, timeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return parseDateWith(s, location, timeFormats) +} + +type timeFormatType int + +const ( + timeFormatNoTimezone timeFormatType = iota + timeFormatNamedTimezone + timeFormatNumericTimezone + timeFormatNumericAndNamedTimezone + timeFormatTimeOnly +) + +type timeFormat struct { + format string + typ timeFormatType +} + +func (f timeFormat) hasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone +} + +var ( + timeFormats = []timeFormat{ + timeFormat{time.RFC3339, timeFormatNumericTimezone}, + timeFormat{"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + timeFormat{time.RFC1123Z, timeFormatNumericTimezone}, + timeFormat{time.RFC1123, timeFormatNamedTimezone}, + timeFormat{time.RFC822Z, timeFormatNumericTimezone}, + timeFormat{time.RFC822, timeFormatNamedTimezone}, + timeFormat{time.RFC850, timeFormatNamedTimezone}, + timeFormat{"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + timeFormat{"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + timeFormat{"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + timeFormat{"2006-01-02 15:04:05", timeFormatNoTimezone}, + timeFormat{time.ANSIC, timeFormatNoTimezone}, + timeFormat{time.UnixDate, timeFormatNamedTimezone}, + timeFormat{time.RubyDate, timeFormatNumericTimezone}, + timeFormat{"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + timeFormat{"2006-01-02", timeFormatNoTimezone}, + timeFormat{"02 Jan 2006", timeFormatNoTimezone}, + timeFormat{"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + timeFormat{"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + timeFormat{time.Kitchen, timeFormatTimeOnly}, + timeFormat{time.Stamp, timeFormatTimeOnly}, + timeFormat{time.StampMilli, timeFormatTimeOnly}, + timeFormat{time.StampMicro, timeFormatTimeOnly}, + timeFormat{time.StampNano, timeFormatTimeOnly}, + } +) + +func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { + + for _, format := range formats { + if d, e = time.Parse(format.format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.typ <= timeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v interface{}) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go new file mode 100644 index 000000000..1524fc82c --- /dev/null +++ b/vendor/github.com/spf13/cast/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. + +package cast + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[timeFormatNoTimezone-0] + _ = x[timeFormatNamedTimezone-1] + _ = x[timeFormatNumericTimezone-2] + _ = x[timeFormatNumericAndNamedTimezone-3] + _ = x[timeFormatTimeOnly-4] +} + +const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" + +var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i timeFormatType) String() string { + if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { + return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] +} diff --git a/vendor/github.com/valyala/bytebufferpool/.travis.yml b/vendor/github.com/valyala/bytebufferpool/.travis.yml new file mode 100644 index 000000000..6a6ec2eb0 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.6 + +script: + # build test for supported platforms + - GOOS=linux go build + - GOOS=darwin go build + - GOOS=freebsd go build + - GOOS=windows go build + - GOARCH=386 go build + + # run tests on a standard platform + - go test -v ./... diff --git a/vendor/github.com/valyala/bytebufferpool/LICENSE b/vendor/github.com/valyala/bytebufferpool/LICENSE new file mode 100644 index 000000000..f7c935c20 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/bytebufferpool/README.md b/vendor/github.com/valyala/bytebufferpool/README.md new file mode 100644 index 000000000..061357e83 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/README.md @@ -0,0 +1,21 @@ +[![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool) +[![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool) +[![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool) + +# bytebufferpool + +An implementation of a pool of byte buffers with anti-memory-waste protection. + +The pool may waste limited amount of memory due to fragmentation. +This amount equals to the maximum total size of the byte buffers +in concurrent use. + +# Benchmark results +Currently bytebufferpool is fastest and most effective buffer pool written in Go. + +You can find results [here](https://omgnull.github.io/go-benchmark/buffer/). + +# bytebufferpool users + +* [fasthttp](https://github.com/valyala/fasthttp) +* [quicktemplate](https://github.com/valyala/quicktemplate) diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go new file mode 100644 index 000000000..07a055a2d --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go @@ -0,0 +1,111 @@ +package bytebufferpool + +import "io" + +// ByteBuffer provides byte buffer, which can be used for minimizing +// memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use Get for obtaining an empty byte buffer. +type ByteBuffer struct { + + // B is a byte buffer to use in append-like workloads. + // See example code for details. + B []byte +} + +// Len returns the size of the byte buffer. +func (b *ByteBuffer) Len() int { + return len(b.B) +} + +// ReadFrom implements io.ReaderFrom. +// +// The function appends all the data read from r to b. +func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { + p := b.B + nStart := int64(len(p)) + nMax := int64(cap(p)) + n := nStart + if nMax == 0 { + nMax = 64 + p = make([]byte, nMax) + } else { + p = p[:nMax] + } + for { + if n == nMax { + nMax *= 2 + bNew := make([]byte, nMax) + copy(bNew, p) + p = bNew + } + nn, err := r.Read(p[n:]) + n += int64(nn) + if err != nil { + b.B = p[:n] + n -= nStart + if err == io.EOF { + return n, nil + } + return n, err + } + } +} + +// WriteTo implements io.WriterTo. +func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(b.B) + return int64(n), err +} + +// Bytes returns b.B, i.e. all the bytes accumulated in the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +func (b *ByteBuffer) Bytes() []byte { + return b.B +} + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + b.B = append(b.B, p...) + return len(p), nil +} + +// WriteByte appends the byte c to the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +// +// The function always returns nil. +func (b *ByteBuffer) WriteByte(c byte) error { + b.B = append(b.B, c) + return nil +} + +// WriteString appends s to ByteBuffer.B. +func (b *ByteBuffer) WriteString(s string) (int, error) { + b.B = append(b.B, s...) + return len(s), nil +} + +// Set sets ByteBuffer.B to p. +func (b *ByteBuffer) Set(p []byte) { + b.B = append(b.B[:0], p...) +} + +// SetString sets ByteBuffer.B to s. +func (b *ByteBuffer) SetString(s string) { + b.B = append(b.B[:0], s...) +} + +// String returns string representation of ByteBuffer.B. +func (b *ByteBuffer) String() string { + return string(b.B) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + b.B = b.B[:0] +} diff --git a/vendor/github.com/valyala/bytebufferpool/doc.go b/vendor/github.com/valyala/bytebufferpool/doc.go new file mode 100644 index 000000000..e511b7c59 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/doc.go @@ -0,0 +1,7 @@ +// Package bytebufferpool implements a pool of byte buffers +// with anti-fragmentation protection. +// +// The pool may waste limited amount of memory due to fragmentation. +// This amount equals to the maximum total size of the byte buffers +// in concurrent use. +package bytebufferpool diff --git a/vendor/github.com/valyala/bytebufferpool/pool.go b/vendor/github.com/valyala/bytebufferpool/pool.go new file mode 100644 index 000000000..8bb4134dd --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/pool.go @@ -0,0 +1,151 @@ +package bytebufferpool + +import ( + "sort" + "sync" + "sync/atomic" +) + +const ( + minBitSize = 6 // 2**6=64 is a CPU cache line size + steps = 20 + + minSize = 1 << minBitSize + maxSize = 1 << (minBitSize + steps - 1) + + calibrateCallsThreshold = 42000 + maxPercentile = 0.95 +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + pool sync.Pool +} + +var defaultPool Pool + +// Get returns an empty byte buffer from the pool. +// +// Got byte buffer may be returned to the pool via Put call. +// This reduces the number of memory allocations required for byte buffer +// management. +func Get() *ByteBuffer { return defaultPool.Get() } + +// Get returns new byte buffer with zero length. +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func (p *Pool) Get() *ByteBuffer { + v := p.pool.Get() + if v != nil { + return v.(*ByteBuffer) + } + return &ByteBuffer{ + B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), + } +} + +// Put returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races will occur. +func Put(b *ByteBuffer) { defaultPool.Put(b) } + +// Put releases byte buffer obtained via Get to the pool. +// +// The buffer mustn't be accessed after returning to the pool. +func (p *Pool) Put(b *ByteBuffer) { + idx := index(len(b.B)) + + if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { + p.calibrate() + } + + maxSize := int(atomic.LoadUint64(&p.maxSize)) + if maxSize == 0 || cap(b.B) <= maxSize { + b.Reset() + p.pool.Put(b) + } +} + +func (p *Pool) calibrate() { + if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { + return + } + + a := make(callSizes, 0, steps) + var callsSum uint64 + for i := uint64(0); i < steps; i++ { + calls := atomic.SwapUint64(&p.calls[i], 0) + callsSum += calls + a = append(a, callSize{ + calls: calls, + size: minSize << i, + }) + } + sort.Sort(a) + + defaultSize := a[0].size + maxSize := defaultSize + + maxSum := uint64(float64(callsSum) * maxPercentile) + callsSum = 0 + for i := 0; i < steps; i++ { + if callsSum > maxSum { + break + } + callsSum += a[i].calls + size := a[i].size + if size > maxSize { + maxSize = size + } + } + + atomic.StoreUint64(&p.defaultSize, defaultSize) + atomic.StoreUint64(&p.maxSize, maxSize) + + atomic.StoreUint64(&p.calibrating, 0) +} + +type callSize struct { + calls uint64 + size uint64 +} + +type callSizes []callSize + +func (ci callSizes) Len() int { + return len(ci) +} + +func (ci callSizes) Less(i, j int) bool { + return ci[i].calls > ci[j].calls +} + +func (ci callSizes) Swap(i, j int) { + ci[i], ci[j] = ci[j], ci[i] +} + +func index(n int) int { + n-- + n >>= minBitSize + idx := 0 + for n > 0 { + n >>= 1 + idx++ + } + if idx >= steps { + idx = steps - 1 + } + return idx +} diff --git a/vendor/github.com/valyala/fasttemplate/LICENSE b/vendor/github.com/valyala/fasttemplate/LICENSE new file mode 100644 index 000000000..7125a63c4 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Aliaksandr Valialkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/fasttemplate/README.md b/vendor/github.com/valyala/fasttemplate/README.md new file mode 100644 index 000000000..2839ed0f7 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/README.md @@ -0,0 +1,85 @@ +fasttemplate +============ + +Simple and fast template engine for Go. + +Fasttemplate performs only a single task - it substitutes template placeholders +with user-defined values. At high speed :) + +Take a look at [quicktemplate](https://github.com/valyala/quicktemplate) if you need fast yet powerful html template engine. + +*Please note that fasttemplate doesn't do any escaping on template values +unlike [html/template](http://golang.org/pkg/html/template/) do. So values +must be properly escaped before passing them to fasttemplate.* + +Fasttemplate is faster than [text/template](http://golang.org/pkg/text/template/), +[strings.Replace](http://golang.org/pkg/strings/#Replace), +[strings.Replacer](http://golang.org/pkg/strings/#Replacer) +and [fmt.Fprintf](https://golang.org/pkg/fmt/#Fprintf) on placeholders' substitution. + +Below are benchmark results comparing fasttemplate performance to text/template, +strings.Replace, strings.Replacer and fmt.Fprintf: + +``` +$ go test -bench=. -benchmem +PASS +BenchmarkFmtFprintf-4 2000000 790 ns/op 0 B/op 0 allocs/op +BenchmarkStringsReplace-4 500000 3474 ns/op 2112 B/op 14 allocs/op +BenchmarkStringsReplacer-4 500000 2657 ns/op 2256 B/op 23 allocs/op +BenchmarkTextTemplate-4 500000 3333 ns/op 336 B/op 19 allocs/op +BenchmarkFastTemplateExecuteFunc-4 5000000 349 ns/op 0 B/op 0 allocs/op +BenchmarkFastTemplateExecute-4 3000000 383 ns/op 0 B/op 0 allocs/op +BenchmarkFastTemplateExecuteFuncString-4 3000000 549 ns/op 144 B/op 1 allocs/op +BenchmarkFastTemplateExecuteString-4 3000000 572 ns/op 144 B/op 1 allocs/op +BenchmarkFastTemplateExecuteTagFunc-4 2000000 743 ns/op 144 B/op 3 allocs/op +``` + + +Docs +==== + +See http://godoc.org/github.com/valyala/fasttemplate . + + +Usage +===== + +```go + template := "http://{{host}}/?q={{query}}&foo={{bar}}{{bar}}" + t := fasttemplate.New(template, "{{", "}}") + s := t.ExecuteString(map[string]interface{}{ + "host": "google.com", + "query": url.QueryEscape("hello=world"), + "bar": "foobar", + }) + fmt.Printf("%s", s) + + // Output: + // http://google.com/?q=hello%3Dworld&foo=foobarfoobar +``` + + +Advanced usage +============== + +```go + template := "Hello, [user]! You won [prize]!!! [foobar]" + t, err := fasttemplate.NewTemplate(template, "[", "]") + if err != nil { + log.Fatalf("unexpected error when parsing template: %s", err) + } + s := t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { + switch tag { + case "user": + return w.Write([]byte("John")) + case "prize": + return w.Write([]byte("$100500")) + default: + return w.Write([]byte(fmt.Sprintf("[unknown tag %q]", tag))) + } + }) + fmt.Printf("%s", s) + + // Output: + // Hello, John! You won $100500!!! [unknown tag "foobar"] +``` diff --git a/vendor/github.com/valyala/fasttemplate/template.go b/vendor/github.com/valyala/fasttemplate/template.go new file mode 100644 index 000000000..f2d3261f8 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/template.go @@ -0,0 +1,436 @@ +// Package fasttemplate implements simple and fast template library. +// +// Fasttemplate is faster than text/template, strings.Replace +// and strings.Replacer. +// +// Fasttemplate ideally fits for fast and simple placeholders' substitutions. +package fasttemplate + +import ( + "bytes" + "fmt" + "io" + + "github.com/valyala/bytebufferpool" +) + +// ExecuteFunc calls f on each template tag (placeholder) occurrence. +// +// Returns the number of bytes written to w. +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteFunc for frozen templates. +func ExecuteFunc(template, startTag, endTag string, w io.Writer, f TagFunc) (int64, error) { + s := unsafeString2Bytes(template) + a := unsafeString2Bytes(startTag) + b := unsafeString2Bytes(endTag) + + var nn int64 + var ni int + var err error + for { + n := bytes.Index(s, a) + if n < 0 { + break + } + ni, err = w.Write(s[:n]) + nn += int64(ni) + if err != nil { + return nn, err + } + + s = s[n+len(a):] + n = bytes.Index(s, b) + if n < 0 { + // cannot find end tag - just write it to the output. + ni, _ = w.Write(a) + nn += int64(ni) + break + } + + ni, err = f(w, unsafeBytes2String(s[:n])) + nn += int64(ni) + if err != nil { + return nn, err + } + s = s[n+len(b):] + } + ni, err = w.Write(s) + nn += int64(ni) + + return nn, err +} + +// Execute substitutes template tags (placeholders) with the corresponding +// values from the map m and writes the result to the given writer w. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +// +// This function is optimized for constantly changing templates. +// Use Template.Execute for frozen templates. +func Execute(template, startTag, endTag string, w io.Writer, m map[string]interface{}) (int64, error) { + return ExecuteFunc(template, startTag, endTag, w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteStd works the same way as Execute, but keeps the unknown placeholders. +// This can be used as a drop-in replacement for strings.Replacer +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteStd for frozen templates. +func ExecuteStd(template, startTag, endTag string, w io.Writer, m map[string]interface{}) (int64, error) { + return ExecuteFunc(template, startTag, endTag, w, func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, startTag, endTag, tag, m) }) +} + +// ExecuteFuncString calls f on each template tag (placeholder) occurrence +// and substitutes it with the data written to TagFunc's w. +// +// Returns the resulting string. +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteFuncString for frozen templates. +func ExecuteFuncString(template, startTag, endTag string, f TagFunc) string { + s, err := ExecuteFuncStringWithErr(template, startTag, endTag, f) + if err != nil { + panic(fmt.Sprintf("unexpected error: %s", err)) + } + return s +} + +// ExecuteFuncStringWithErr is nearly the same as ExecuteFuncString +// but when f returns an error, ExecuteFuncStringWithErr won't panic like ExecuteFuncString +// it just returns an empty string and the error f returned +func ExecuteFuncStringWithErr(template, startTag, endTag string, f TagFunc) (string, error) { + if n := bytes.Index(unsafeString2Bytes(template), unsafeString2Bytes(startTag)); n < 0 { + return template, nil + } + + bb := byteBufferPool.Get() + if _, err := ExecuteFunc(template, startTag, endTag, bb, f); err != nil { + bb.Reset() + byteBufferPool.Put(bb) + return "", err + } + s := string(bb.B) + bb.Reset() + byteBufferPool.Put(bb) + return s, nil +} + +var byteBufferPool bytebufferpool.Pool + +// ExecuteString substitutes template tags (placeholders) with the corresponding +// values from the map m and returns the result. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteString for frozen templates. +func ExecuteString(template, startTag, endTag string, m map[string]interface{}) string { + return ExecuteFuncString(template, startTag, endTag, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteStringStd works the same way as ExecuteString, but keeps the unknown placeholders. +// This can be used as a drop-in replacement for strings.Replacer +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteStringStd for frozen templates. +func ExecuteStringStd(template, startTag, endTag string, m map[string]interface{}) string { + return ExecuteFuncString(template, startTag, endTag, func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, startTag, endTag, tag, m) }) +} + +// Template implements simple template engine, which can be used for fast +// tags' (aka placeholders) substitution. +type Template struct { + template string + startTag string + endTag string + + texts [][]byte + tags []string + byteBufferPool bytebufferpool.Pool +} + +// New parses the given template using the given startTag and endTag +// as tag start and tag end. +// +// The returned template can be executed by concurrently running goroutines +// using Execute* methods. +// +// New panics if the given template cannot be parsed. Use NewTemplate instead +// if template may contain errors. +func New(template, startTag, endTag string) *Template { + t, err := NewTemplate(template, startTag, endTag) + if err != nil { + panic(err) + } + return t +} + +// NewTemplate parses the given template using the given startTag and endTag +// as tag start and tag end. +// +// The returned template can be executed by concurrently running goroutines +// using Execute* methods. +func NewTemplate(template, startTag, endTag string) (*Template, error) { + var t Template + err := t.Reset(template, startTag, endTag) + if err != nil { + return nil, err + } + return &t, nil +} + +// TagFunc can be used as a substitution value in the map passed to Execute*. +// Execute* functions pass tag (placeholder) name in 'tag' argument. +// +// TagFunc must be safe to call from concurrently running goroutines. +// +// TagFunc must write contents to w and return the number of bytes written. +type TagFunc func(w io.Writer, tag string) (int, error) + +// Reset resets the template t to new one defined by +// template, startTag and endTag. +// +// Reset allows Template object re-use. +// +// Reset may be called only if no other goroutines call t methods at the moment. +func (t *Template) Reset(template, startTag, endTag string) error { + // Keep these vars in t, so GC won't collect them and won't break + // vars derived via unsafe* + t.template = template + t.startTag = startTag + t.endTag = endTag + t.texts = t.texts[:0] + t.tags = t.tags[:0] + + if len(startTag) == 0 { + panic("startTag cannot be empty") + } + if len(endTag) == 0 { + panic("endTag cannot be empty") + } + + s := unsafeString2Bytes(template) + a := unsafeString2Bytes(startTag) + b := unsafeString2Bytes(endTag) + + tagsCount := bytes.Count(s, a) + if tagsCount == 0 { + return nil + } + + if tagsCount+1 > cap(t.texts) { + t.texts = make([][]byte, 0, tagsCount+1) + } + if tagsCount > cap(t.tags) { + t.tags = make([]string, 0, tagsCount) + } + + for { + n := bytes.Index(s, a) + if n < 0 { + t.texts = append(t.texts, s) + break + } + t.texts = append(t.texts, s[:n]) + + s = s[n+len(a):] + n = bytes.Index(s, b) + if n < 0 { + return fmt.Errorf("Cannot find end tag=%q in the template=%q starting from %q", endTag, template, s) + } + + t.tags = append(t.tags, unsafeBytes2String(s[:n])) + s = s[n+len(b):] + } + + return nil +} + +// ExecuteFunc calls f on each template tag (placeholder) occurrence. +// +// Returns the number of bytes written to w. +// +// This function is optimized for frozen templates. +// Use ExecuteFunc for constantly changing templates. +func (t *Template) ExecuteFunc(w io.Writer, f TagFunc) (int64, error) { + var nn int64 + + n := len(t.texts) - 1 + if n == -1 { + ni, err := w.Write(unsafeString2Bytes(t.template)) + return int64(ni), err + } + + for i := 0; i < n; i++ { + ni, err := w.Write(t.texts[i]) + nn += int64(ni) + if err != nil { + return nn, err + } + + ni, err = f(w, t.tags[i]) + nn += int64(ni) + if err != nil { + return nn, err + } + } + ni, err := w.Write(t.texts[n]) + nn += int64(ni) + return nn, err +} + +// Execute substitutes template tags (placeholders) with the corresponding +// values from the map m and writes the result to the given writer w. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +func (t *Template) Execute(w io.Writer, m map[string]interface{}) (int64, error) { + return t.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteStd works the same way as Execute, but keeps the unknown placeholders. +// This can be used as a drop-in replacement for strings.Replacer +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +func (t *Template) ExecuteStd(w io.Writer, m map[string]interface{}) (int64, error) { + return t.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, t.startTag, t.endTag, tag, m) }) +} + +// ExecuteFuncString calls f on each template tag (placeholder) occurrence +// and substitutes it with the data written to TagFunc's w. +// +// Returns the resulting string. +// +// This function is optimized for frozen templates. +// Use ExecuteFuncString for constantly changing templates. +func (t *Template) ExecuteFuncString(f TagFunc) string { + s, err := t.ExecuteFuncStringWithErr(f) + if err != nil { + panic(fmt.Sprintf("unexpected error: %s", err)) + } + return s +} + +// ExecuteFuncStringWithErr calls f on each template tag (placeholder) occurrence +// and substitutes it with the data written to TagFunc's w. +// +// Returns the resulting string. +// +// This function is optimized for frozen templates. +// Use ExecuteFuncString for constantly changing templates. +func (t *Template) ExecuteFuncStringWithErr(f TagFunc) (string, error) { + bb := t.byteBufferPool.Get() + if _, err := t.ExecuteFunc(bb, f); err != nil { + bb.Reset() + t.byteBufferPool.Put(bb) + return "", err + } + s := string(bb.Bytes()) + bb.Reset() + t.byteBufferPool.Put(bb) + return s, nil +} + +// ExecuteString substitutes template tags (placeholders) with the corresponding +// values from the map m and returns the result. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for frozen templates. +// Use ExecuteString for constantly changing templates. +func (t *Template) ExecuteString(m map[string]interface{}) string { + return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteStringStd works the same way as ExecuteString, but keeps the unknown placeholders. +// This can be used as a drop-in replacement for strings.Replacer +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for frozen templates. +// Use ExecuteStringStd for constantly changing templates. +func (t *Template) ExecuteStringStd(m map[string]interface{}) string { + return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, t.startTag, t.endTag, tag, m) }) +} + +func stdTagFunc(w io.Writer, tag string, m map[string]interface{}) (int, error) { + v := m[tag] + if v == nil { + return 0, nil + } + switch value := v.(type) { + case []byte: + return w.Write(value) + case string: + return w.Write([]byte(value)) + case TagFunc: + return value(w, tag) + default: + panic(fmt.Sprintf("tag=%q contains unexpected value type=%#v. Expected []byte, string or TagFunc", tag, v)) + } +} + +func keepUnknownTagFunc(w io.Writer, startTag, endTag, tag string, m map[string]interface{}) (int, error) { + v, ok := m[tag] + if !ok { + if _, err := w.Write(unsafeString2Bytes(startTag)); err != nil { + return 0, err + } + if _, err := w.Write(unsafeString2Bytes(tag)); err != nil { + return 0, err + } + if _, err := w.Write(unsafeString2Bytes(endTag)); err != nil { + return 0, err + } + return len(startTag) + len(tag) + len(endTag), nil + } + if v == nil { + return 0, nil + } + switch value := v.(type) { + case []byte: + return w.Write(value) + case string: + return w.Write([]byte(value)) + case TagFunc: + return value(w, tag) + default: + panic(fmt.Sprintf("tag=%q contains unexpected value type=%#v. Expected []byte, string or TagFunc", tag, v)) + } +} diff --git a/vendor/github.com/valyala/fasttemplate/unsafe.go b/vendor/github.com/valyala/fasttemplate/unsafe.go new file mode 100644 index 000000000..1020ca387 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/unsafe.go @@ -0,0 +1,21 @@ +// +build !appengine + +package fasttemplate + +import ( + "reflect" + "unsafe" +) + +func unsafeBytes2String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func unsafeString2Bytes(s string) (b []byte) { + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = sh.Data + bh.Cap = sh.Len + bh.Len = sh.Len + return b +} diff --git a/vendor/github.com/valyala/fasttemplate/unsafe_gae.go b/vendor/github.com/valyala/fasttemplate/unsafe_gae.go new file mode 100644 index 000000000..cc4ce1516 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/unsafe_gae.go @@ -0,0 +1,11 @@ +// +build appengine + +package fasttemplate + +func unsafeBytes2String(b []byte) string { + return string(b) +} + +func unsafeString2Bytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000..55ede8a42 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md new file mode 100644 index 000000000..00059242c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -0,0 +1,41 @@ +# gojsonpointer +An implementation of JSON Pointer - Go language + +## Usage + jsonText := `{ + "name": "Bobby B", + "occupation": { + "title" : "King", + "years" : 15, + "heir" : "Joffrey B" + } + }` + + var jsonDocument map[string]interface{} + json.Unmarshal([]byte(jsonText), &jsonDocument) + + //create a JSON pointer + pointerString := "/occupation/title" + pointer, _ := NewJsonPointer(pointerString) + + //SET a new value for the "title" in the document + pointer.Set(jsonDocument, "Supreme Leader of Westeros") + + //GET the new "title" from the document + title, _, _ := pointer.Get(jsonDocument) + fmt.Println(title) //outputs "Supreme Leader of Westeros" + + //DELETE the "heir" from the document + deletePointer := NewJsonPointer("/occupation/heir") + deletePointer.Delete(jsonDocument) + + b, _ := json.Marshal(jsonDocument) + fmt.Println(string(b)) + //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` + + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go new file mode 100644 index 000000000..7faf5d7f9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -0,0 +1,211 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package gojsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +const ( + const_empty_pointer = `` + const_pointer_separator = `/` + + const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` +) + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +type JsonPointer struct { + referenceTokens []string +} + +// NewJsonPointer parses the given string JSON pointer and returns an object +func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { + + // Pointer to the root of the document + if len(jsonPointerString) == 0 { + // Keep referenceTokens nil + return + } + if jsonPointerString[0] != '/' { + return p, errors.New(const_invalid_start) + } + + p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) + return +} + +// Uses the pointer to retrieve a value from a JSON document +func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + + is := &implStruct{mode: "GET", inDocument: document} + p.implementation(is) + return is.getOutNode, is.getOutKind, is.outError + +} + +// Uses the pointer to update a value from a JSON document +func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { + + is := &implStruct{mode: "SET", inDocument: document, setInValue: value} + p.implementation(is) + return document, is.outError + +} + +// Uses the pointer to delete a value from a JSON document +func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { + is := &implStruct{mode: "DEL", inDocument: document} + p.implementation(is) + return document, is.outError +} + +// Both Get and Set functions use the same implementation to avoid code duplication +func (p *JsonPointer) implementation(i *implStruct) { + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + i.getOutNode = i.inDocument + i.outError = nil + i.getOutKind = kind + i.outError = nil + return + } + + node := i.inDocument + + previousNodes := make([]interface{}, len(p.referenceTokens)) + previousTokens := make([]string, len(p.referenceTokens)) + + for ti, token := range p.referenceTokens { + + isLastToken := ti == len(p.referenceTokens)-1 + previousNodes[ti] = node + previousTokens[ti] = token + + switch v := node.(type) { + + case map[string]interface{}: + decodedToken := decodeReferenceToken(token) + if _, ok := v[decodedToken]; ok { + node = v[decodedToken] + if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } else if isLastToken && i.mode =="DEL" { + delete(v,decodedToken) + } + } else if (isLastToken && i.mode == "SET") { + v[decodedToken] = i.setInValue + } else { + i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) + i.getOutKind = reflect.Map + i.getOutNode = nil + return + } + + case []interface{}: + tokenIndex, err := strconv.Atoi(token) + if err != nil { + i.outError = fmt.Errorf("Invalid array index '%s'", token) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + if tokenIndex < 0 || tokenIndex >= len(v) { + i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + + node = v[tokenIndex] + if isLastToken && i.mode == "SET" { + v[tokenIndex] = i.setInValue + } else if isLastToken && i.mode =="DEL" { + v[tokenIndex] = v[len(v)-1] + v[len(v)-1] = nil + v = v[:len(v)-1] + previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v + } + + default: + i.outError = fmt.Errorf("Invalid token reference '%s'", token) + i.getOutKind = reflect.ValueOf(node).Kind() + i.getOutNode = nil + return + } + + } + + i.getOutNode = node + i.getOutKind = reflect.ValueOf(node).Kind() + i.outError = nil +} + +// Pointer to string representation function +func (p *JsonPointer) String() string { + + if len(p.referenceTokens) == 0 { + return const_empty_pointer + } + + pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +func decodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~1`, `/`, -1) + step2 := strings.Replace(step1, `~0`, `~`, -1) + return step2 +} + +func encodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~`, `~0`, -1) + step2 := strings.Replace(step1, `/`, `~1`, -1) + return step2 +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000..55ede8a42 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md new file mode 100644 index 000000000..9ab6e1eb1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/README.md @@ -0,0 +1,10 @@ +# gojsonreference +An implementation of JSON Reference - Go language + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go new file mode 100644 index 000000000..645729130 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go @@ -0,0 +1,147 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package gojsonreference + +import ( + "errors" + "net/url" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonpointer" +) + +const ( + const_fragment_char = `#` +) + +func NewJsonReference(jsonReferenceString string) (JsonReference, error) { + + var r JsonReference + err := r.parse(jsonReferenceString) + return r, err + +} + +type JsonReference struct { + referenceUrl *url.URL + referencePointer gojsonpointer.JsonPointer + + HasFullUrl bool + HasUrlPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +func (r *JsonReference) GetUrl() *url.URL { + return r.referenceUrl +} + +func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { + return &r.referencePointer +} + +func (r *JsonReference) String() string { + + if r.referenceUrl != nil { + return r.referenceUrl.String() + } + + if r.HasFragmentOnly { + return const_fragment_char + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +func (r *JsonReference) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) +} + +// "Constructor", parses the given string JSON reference +func (r *JsonReference) parse(jsonReferenceString string) (err error) { + + r.referenceUrl, err = url.Parse(jsonReferenceString) + if err != nil { + return + } + refUrl := r.referenceUrl + + if refUrl.Scheme != "" && refUrl.Host != "" { + r.HasFullUrl = true + } else { + if refUrl.Path != "" { + r.HasUrlPathOnly = true + } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refUrl.Scheme == "file" + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, and if it + // doesn't then its first component will be treated as the host by the + // Go runtime + if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) + } + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path) + } + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) + + return +} + +// Creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { + if child.GetUrl() == nil { + return nil, errors.New("childUrl is nil!") + } + + if r.GetUrl() == nil { + return nil, errors.New("parentUrl is nil!") + } + + // Get a copy of the parent url to make sure we do not modify the original. + // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. + // The fragment of the child must be used, so the fragment of the parent is manually removed. + parentUrl := *r.GetUrl() + parentUrl.Fragment = "" + + ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) + if err != nil { + return nil, err + } + return &ref, err +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore new file mode 100644 index 000000000..68e993ce3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore @@ -0,0 +1,3 @@ +*.sw[nop] +*.iml +.vscode/ diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml new file mode 100644 index 000000000..3289001cd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - "1.11" + - "1.12" + - "1.13" +before_install: + - go get github.com/xeipuuv/gojsonreference + - go get github.com/xeipuuv/gojsonpointer + - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000..55ede8a42 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md new file mode 100644 index 000000000..758f26df0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/README.md @@ -0,0 +1,466 @@ +[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) +[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + + +## Loading local schemas + +By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. + +```go + sl := gojsonschema.NewSchemaLoader() + loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) + err := sl.AddSchema("http://some_host.com/string.json", loader1) +``` + +Alternatively if your schema already has an `$id` you can use the `AddSchemas` function +```go + loader2 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/maxlength.json", + "maxLength" : 5 + }`) + err = sl.AddSchemas(loader2) +``` + +The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. +```go + loader3 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/main.json", + "allOf" : [ + { "$ref" : "http://some_host.com/string.json" }, + { "$ref" : "http://some_host.com/maxlength.json" } + ] + }`) + + schema, err := sl.Compile(loader3) + + documentLoader := gojsonschema.NewStringLoader(`"hello world"`) + + result, err := schema.Validate(documentLoader) +``` + +It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. + +```go +err = sl.AddSchemas(loader3) +schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) +``` + +Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. + +## Using a specific draft +By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. + +Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Draft = gojsonschema.Draft7 +sl.AutoDetect = false +``` + +If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. + +## Meta-schema validation +Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. + +The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Validate = true +err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ + $id" : "http://some_host.com/invalid.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "multipleOf" : true +}`)) + ``` +``` + ``` + +Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. + +Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. + + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "const": ConstEror + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "contains" : ArrayContainsError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "invalid_property_name": InvalidPropertyNameError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + "condition_then" : ConditionThenError + "condition_else" : ConditionElseError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. +``` +{{.field}} must be greater than or equal to {{.min}} +``` + +The library allows you to specify custom template functions, should you require more complex error message handling. +```go +gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ + "allcaps": func(s string) string { + return strings.ToUpper(s) + }, +} +``` + +Given the above definition, you can use the custom function `"allcaps"` in your localization templates: +``` +{{allcaps .field}} must be greater than or equal to {{.min}} +``` + +The above error message would then be rendered with the `field` value in capital letters. For example: +``` +"PASSWORD must be greater than or equal to 8" +``` + +Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. + +## Formats +JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: + +````json +{"type": "string", "format": "email"} +```` + +Not all formats defined in draft-07 are available. Implemented formats are: + +* `date` +* `time` +* `date-time` +* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. +* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. +* `idn-email`. Same caveat as `email`. +* `ipv4` +* `ipv6` +* `uri`. Includes unicode support. +* `uri-reference`. Includes unicode support. +* `iri` +* `iri-reference` +* `uri-template` +* `uuid` +* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. +* `json-pointer` +* `relative-json-pointer` + +`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific +unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. + +The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return strings.HasPrefix("ROLE_", asString) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +Another example would be to check if the provided integer matches an id on database: + +JSON schema: +```json +{"type": "integer", "format": "ValidUserId"} +``` + +```go +// Define the format checker +type ValidUserIdFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { + + asFloat64, ok := input.(float64) // Numbers are always float64 here + if ok == false { + return false + } + + // XXX + // do the magic on the database looking for the int(asFloat64) + + return true +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) +```` + +Formats can also be removed, for example if you want to override one of the formats that is defined by default. + +```go +gojsonschema.FormatCheckers.Remove("hostname") +``` + + +## Additional custom validation +After the validation has run and you have the results, you may add additional +errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead +of having to add special exceptions for your own errors. Below is an example. + +```go +type AnswerInvalidError struct { + gojsonschema.ResultErrorFields +} + +func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { + err := AnswerInvalidError{} + err.SetContext(context) + err.SetType("custom_invalid_error") + // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed + // using the description of err will be overridden by this. + err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") + err.SetValue(value) + err.SetDetails(details) + + return &err +} + +func main() { + // ... + schema, err := gojsonschema.NewSchema(schemaLoader) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + + if true { // some validation + jsonContext := gojsonschema.NewJsonContext("question", nil) + errDetail := gojsonschema.ErrorDetails{ + "answer": 42, + } + result.AddError( + newAnswerInvalidError( + gojsonschema.NewJsonContext("answer", jsonContext), + 52, + errDetail, + ), + errDetail, + ) + } + + return result, err + +} +``` + +This is especially useful if you want to add validation beyond what the +json schema drafts can provide such business specific logic. + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/draft.go b/vendor/github.com/xeipuuv/gojsonschema/draft.go new file mode 100644 index 000000000..61298e7aa --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/draft.go @@ -0,0 +1,125 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "errors" + "math" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +// Draft is a JSON-schema draft version +type Draft int + +// Supported Draft versions +const ( + Draft4 Draft = 4 + Draft6 Draft = 6 + Draft7 Draft = 7 + Hybrid Draft = math.MaxInt32 +) + +type draftConfig struct { + Version Draft + MetaSchemaURL string + MetaSchema string +} +type draftConfigs []draftConfig + +var drafts draftConfigs + +func init() { + drafts = []draftConfig{ + { + Version: Draft4, + MetaSchemaURL: "http://json-schema.org/draft-04/schema", + MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, + }, + { + Version: Draft6, + MetaSchemaURL: "http://json-schema.org/draft-06/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, + }, + { + Version: Draft7, + MetaSchemaURL: "http://json-schema.org/draft-07/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, + }, + } +} + +func (dc draftConfigs) GetMetaSchema(url string) string { + for _, config := range dc { + if config.MetaSchemaURL == url { + return config.MetaSchema + } + } + return "" +} +func (dc draftConfigs) GetDraftVersion(url string) *Draft { + for _, config := range dc { + if config.MetaSchemaURL == url { + return &config.Version + } + } + return nil +} +func (dc draftConfigs) GetSchemaURL(draft Draft) string { + for _, config := range dc { + if config.Version == draft { + return config.MetaSchemaURL + } + } + return "" +} + +func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { + + if isKind(documentNode, reflect.Bool) { + return "", nil, nil + } + + if !isKind(documentNode, reflect.Map) { + return "", nil, errors.New("schema is invalid") + } + + m := documentNode.(map[string]interface{}) + + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return "", nil, errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": KEY_SCHEMA, + "type": TYPE_STRING, + }, + )) + } + + schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) + + if err != nil { + return "", nil, err + } + + schema := schemaReference.String() + + return schema, drafts.GetDraftVersion(schema), nil + } + + return "", nil, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go new file mode 100644 index 000000000..e4e9814f3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -0,0 +1,364 @@ +package gojsonschema + +import ( + "bytes" + "sync" + "text/template" +) + +var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} + +// template.Template is not thread-safe for writing, so some locking is done +// sync.RWMutex is used for efficiently locking when new templates are created +type errorTemplate struct { + *template.Template + sync.RWMutex +} + +type ( + + // FalseError. ErrorDetails: - + FalseError struct { + ResultErrorFields + } + + // RequiredError indicates that a required field is missing + // ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError indicates that a field has the incorrect type + // ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError is produced in case of a failing "anyOf" validation + // ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError is produced in case of a failing "oneOf" validation + // ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError is produced in case of a failing "allOf" validation + // ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError is produced if a "not" validation failed + // ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError is produced in case of a "missing dependency" problem + // ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError indicates an internal error + // ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // ConstError indicates a const error + // ErrorDetails: allowed + ConstError struct { + ResultErrorFields + } + + // EnumError indicates an enum error + // ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed + // ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError is produced if an array contains less items than the allowed minimum + // ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum + // ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items + // ErrorDetails: type, i, j + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayContainsError is produced if an array contains invalid items + // ErrorDetails: + ArrayContainsError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum + // ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum + // ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed + // ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError is produced if an pattern was found + // ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // InvalidPropertyNameError is produced if an invalid-named property was found + // ErrorDetails: property + InvalidPropertyNameError struct { + ResultErrorFields + } + + // StringLengthGTEError is produced if a string is shorter than the minimum required length + // ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError is produced if a string is longer than the maximum allowed length + // ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError is produced if a string does not match the defined pattern + // ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError is produced if a string does not match the defined format + // ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError is produced if a number is not a multiple of the defined multipleOf + // ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError is produced if a number is lower than the allowed minimum + // ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set + // ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError is produced if a number is higher than the allowed maximum + // ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set + // ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } + + // ConditionThenError is produced if a condition's "then" validation is invalid + // ErrorDetails: - + ConditionThenError struct { + ResultErrorFields + } + + // ConditionElseError is produced if a condition's "else" condition is invalid + // ErrorDetails: - + ConditionElseError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *FalseError: + t = "false" + d = locale.False() + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *ConstError: + t = "const" + d = locale.Const() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayContainsError: + t = "contains" + d = locale.ArrayContains() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *InvalidPropertyNameError: + t = "invalid_property_name" + d = locale.InvalidPropertyName() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + case *ConditionThenError: + t = "condition_then" + d = locale.ConditionThen() + case *ConditionElseError: + t = "condition_else" + d = locale.ConditionElse() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + err.SetDescriptionFormat(d) + details["field"] = err.Field() + + if _, exists := details["context"]; !exists && context != nil { + details["context"] = context.String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) +} + +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + errorTemplates.RLock() + tpl = errorTemplates.Lookup(s) + errorTemplates.RUnlock() + + if tpl == nil { + errorTemplates.Lock() + tpl = errorTemplates.New(s) + + if ErrorTemplateFuncs != nil { + tpl.Funcs(ErrorTemplateFuncs) + } + + tpl, err = tpl.Parse(s) + errorTemplates.Unlock() + + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() + } + + return descrAsBuffer.String() +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go new file mode 100644 index 000000000..873ffc7d7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -0,0 +1,368 @@ +package gojsonschema + +import ( + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + // IsFormat checks if input has the correct format and type + IsFormat(input interface{}) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatChecker verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the IPv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the IPv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // DateFormatChecker verifies date formats + // + // Valid format: + // Full Date: YYYY-MM-DD + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + DateFormatChecker struct{} + + // TimeFormatChecker verifies time formats + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Time: HH:MM:SSZ-07:00 + // + // Where + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + TimeFormatChecker struct{} + + // URIFormatChecker validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 + URIReferenceFormatChecker struct{} + + // URITemplateFormatChecker validates a URI template per RFC6570 + URITemplateFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} + + // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 + JSONPointerFormatChecker struct{} + + // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format + RelativeJSONPointerFormatChecker struct{} +) + +var ( + // FormatCheckers holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date": DateFormatChecker{}, + "time": TimeFormatChecker{}, + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "idn-email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uri-reference": URIReferenceFormatChecker{}, + "iri": URIFormatChecker{}, + "iri-reference": URIReferenceFormatChecker{}, + "uri-template": URITemplateFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, + "json-pointer": JSONPointerFormatChecker{}, + "relative-json-pointer": RelativeJSONPointerFormatChecker{}, + }, + } + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI + rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + + rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") + + rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") + + lock = new(sync.RWMutex) +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + lock.Lock() + c.formatters[name] = f + lock.Unlock() + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + lock.Lock() + delete(c.formatters, name) + lock.Unlock() + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + lock.RLock() + _, ok := c.formatters[name] + lock.RUnlock() + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + lock.RLock() + f, ok := c.formatters[name] + lock.RUnlock() + + // If a format is unrecognized it should always pass validation + if !ok { + return true + } + + return f.IsFormat(input) +} + +// IsFormat checks if input is a correctly formatted e-mail address +func (f EmailFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := mail.ParseAddress(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted IPv4-address +func (f IPV4FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ".") +} + +// IsFormat checks if input is a correctly formatted IPv6=address +func (f IPV6FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ":") +} + +// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 +func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, asString); err == nil { + return true + } + } + + return false +} + +// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) +func (f DateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + _, err := time.Parse("2006-01-02", asString) + return err == nil +} + +// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) +func (f TimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { + return true + } + + _, err := time.Parse("15:04:05", asString) + return err == nil +} + +// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 +func (f URIFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + + if err != nil || u.Scheme == "" { + return false + } + + return !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 +func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := url.Parse(asString) + return err == nil && !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI template per RFC6570 +func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + if err != nil || strings.Contains(asString, `\`) { + return false + } + + return rxURITemplate.MatchString(u.Path) +} + +// IsFormat checks if input is a correctly formatted hostname +func (f HostnameFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxHostname.MatchString(asString) && len(asString) < 256 +} + +// IsFormat checks if input is a correctly formatted UUID +func (f UUIDFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxUUID.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted regular expression +func (f RegexFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if asString == "" { + return true + } + _, err := regexp.Compile(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 +func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxJSONPointer.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted relative JSON Pointer +func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxRelJSONPointer.MatchString(asString) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml new file mode 100644 index 000000000..ab6fb867c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml @@ -0,0 +1,13 @@ +package: github.com/xeipuuv/gojsonschema +license: Apache 2.0 +import: +- package: github.com/xeipuuv/gojsonschema + +- package: github.com/xeipuuv/gojsonpointer + +- package: github.com/xeipuuv/gojsonreference + +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go new file mode 100644 index 000000000..4ef7a8d03 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go new file mode 100644 index 000000000..0e979707b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go @@ -0,0 +1,73 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// JsonContext implements a persistent linked-list of strings +type JsonContext struct { + head string + tail *JsonContext +} + +// NewJsonContext creates a new JsonContext +func NewJsonContext(head string, tail *JsonContext) *JsonContext { + return &JsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *JsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *JsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go new file mode 100644 index 000000000..5d88af263 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -0,0 +1,386 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSONLoader defines the JSON loader interface +type JSONLoader interface { + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +// JSONLoaderFactory defines the JSON loader factory interface +type JSONLoaderFactory interface { + // New creates a new JSON loader for the given source + New(source string) JSONLoader +} + +// DefaultJSONLoaderFactory is the default JSON loader factory +type DefaultJSONLoaderFactory struct { +} + +// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +// New creates a new JSON loader for the given source +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// New creates a new JSON loader for the given source +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +// Opens a file with the given name +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &FileSystemJSONLoaderFactory{ + fs: l.fs, + } +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) + if err != nil { + return nil, err + } + + refToURL := reference + refToURL.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.TrimPrefix(refToURL.String(), "file://") + filename, err = url.QueryUnescape(filename) + + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + filename = strings.TrimPrefix(filename, "/") + filename = filepath.FromSlash(filename) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToURL.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + // returned cached versions for metaschemas for drafts 4, 6 and 7 + // for performance and allow for easier offline use + if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { + return decodeJSONUsingNumber(strings.NewReader(metaSchema)) + } + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewStringLoader creates a new JSONLoader, taking a string as source +func NewStringLoader(source string) JSONLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { + + return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) + +} + +// JSON bytes loader + +type jsonBytesLoader struct { + source []byte +} + +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source +func NewBytesLoader(source []byte) JSONLoader { + return &jsonBytesLoader{source: source} +} + +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewGoLoader creates a new JSONLoader from a given Go struct +func NewGoLoader(source interface{}) JSONLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.JsonSource()) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) + +} + +type jsonIOLoader struct { + buf *bytes.Buffer +} + +// NewReaderLoader creates a new JSON loader using the provided io.Reader +func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) +} + +// NewWriterLoader creates a new JSON loader using the provided io.Writer +func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) +} + +func (l *jsonIOLoader) JsonSource() interface{} { + return l.buf.String() +} + +func (l *jsonIOLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(l.buf) +} + +func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// JSON raw loader +// In case the JSON is already marshalled to interface{} use this loader +// This is used for testing as otherwise there is no guarantee the JSON is marshalled +// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber +type jsonRawLoader struct { + source interface{} +} + +// NewRawLoader creates a new JSON raw loader for the given source +func NewRawLoader(source interface{}) JSONLoader { + return &jsonRawLoader{source: source} +} +func (l *jsonRawLoader) JsonSource() interface{} { + return l.source +} +func (l *jsonRawLoader) LoadJSON() (interface{}, error) { + return l.source, nil +} +func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} +func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go new file mode 100644 index 000000000..a416225cd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -0,0 +1,472 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for defining custom error strings + locale interface { + + // False returns a format-string for "false" schema validation errors + False() string + + // Required returns a format-string for "required" schema validation errors + Required() string + + // InvalidType returns a format-string for "invalid type" schema validation errors + InvalidType() string + + // NumberAnyOf returns a format-string for "anyOf" schema validation errors + NumberAnyOf() string + + // NumberOneOf returns a format-string for "oneOf" schema validation errors + NumberOneOf() string + + // NumberAllOf returns a format-string for "allOf" schema validation errors + NumberAllOf() string + + // NumberNot returns a format-string to format a NumberNotError + NumberNot() string + + // MissingDependency returns a format-string for "missing dependency" schema validation errors + MissingDependency() string + + // Internal returns a format-string for internal errors + Internal() string + + // Const returns a format-string to format a ConstError + Const() string + + // Enum returns a format-string to format an EnumError + Enum() string + + // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema + ArrayNotEnoughItems() string + + // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError + ArrayNoAdditionalItems() string + + // ArrayMinItems returns a format-string to format an ArrayMinItemsError + ArrayMinItems() string + + // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError + ArrayMaxItems() string + + // Unique returns a format-string to format an ItemsMustBeUniqueError + Unique() string + + // ArrayContains returns a format-string to format an ArrayContainsError + ArrayContains() string + + // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError + ArrayMinProperties() string + + // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError + ArrayMaxProperties() string + + // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError + AdditionalPropertyNotAllowed() string + + // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError + InvalidPropertyPattern() string + + // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError + InvalidPropertyName() string + + // StringGTE returns a format-string to format an StringLengthGTEError + StringGTE() string + + // StringLTE returns a format-string to format an StringLengthLTEError + StringLTE() string + + // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError + DoesNotMatchPattern() string + + // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError + DoesNotMatchFormat() string + + // MultipleOf returns a format-string to format an MultipleOfError + MultipleOf() string + + // NumberGTE returns a format-string to format an NumberGTEError + NumberGTE() string + + // NumberGT returns a format-string to format an NumberGTError + NumberGT() string + + // NumberLTE returns a format-string to format an NumberLTEError + NumberLTE() string + + // NumberLT returns a format-string to format an NumberLTError + NumberLT() string + + // Schema validations + + // RegexPattern returns a format-string to format a regex-pattern error + RegexPattern() string + + // GreaterThanZero returns a format-string to format an error where a number must be greater than zero + GreaterThanZero() string + + // MustBeOfA returns a format-string to format an error where a value is of the wrong type + MustBeOfA() string + + // MustBeOfAn returns a format-string to format an error where a value is of the wrong type + MustBeOfAn() string + + // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error + CannotBeUsedWithout() string + + // CannotBeGT returns a format-string to format an error where a value are greater than allowed + CannotBeGT() string + + // MustBeOfType returns a format-string to format an error where a value does not match the required type + MustBeOfType() string + + // MustBeValidRegex returns a format-string to format an error where a regex is invalid + MustBeValidRegex() string + + // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format + MustBeValidFormat() string + + // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 + MustBeGTEZero() string + + // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed + KeyCannotBeGreaterThan() string + + // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type + KeyItemsMustBeOfType() string + + // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique + KeyItemsMustBeUnique() string + + // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error + ReferenceMustBeCanonical() string + + // NotAValidType returns a format-string to format an invalid type error + NotAValidType() string + + // Duplicated returns a format-string to format an error where types are duplicated + Duplicated() string + + // HttpBadStatus returns a format-string for errors when loading a schema using HTTP + HttpBadStatus() string + + // ParseError returns a format-string for JSON parsing errors + ParseError() string + + // ConditionThen returns a format-string for ConditionThenError errors + ConditionThen() string + + // ConditionElse returns a format-string for ConditionElseError errors + ConditionElse() string + + // ErrorFormat returns a format string for errors + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +// False returns a format-string for "false" schema validation errors +func (l DefaultLocale) False() string { + return "False always fails validation" +} + +// Required returns a format-string for "required" schema validation errors +func (l DefaultLocale) Required() string { + return `{{.property}} is required` +} + +// InvalidType returns a format-string for "invalid type" schema validation errors +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` +} + +// NumberAnyOf returns a format-string for "anyOf" schema validation errors +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +// NumberOneOf returns a format-string for "oneOf" schema validation errors +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +// NumberAllOf returns a format-string for "allOf" schema validation errors +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +// NumberNot returns a format-string to format a NumberNotError +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +// MissingDependency returns a format-string for "missing dependency" schema validation errors +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on {{.dependency}}` +} + +// Internal returns a format-string for internal errors +func (l DefaultLocale) Internal() string { + return `Internal Error {{.error}}` +} + +// Const returns a format-string to format a ConstError +func (l DefaultLocale) Const() string { + return `{{.field}} does not match: {{.allowed}}` +} + +// Enum returns a format-string to format an EnumError +func (l DefaultLocale) Enum() string { + return `{{.field}} must be one of the following: {{.allowed}}` +} + +// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + +// ArrayMinItems returns a format-string to format an ArrayMinItemsError +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least {{.min}} items` +} + +// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most {{.max}} items` +} + +// Unique returns a format-string to format an ItemsMustBeUniqueError +func (l DefaultLocale) Unique() string { + return `{{.type}} items[{{.i}},{{.j}}] must be unique` +} + +// ArrayContains returns a format-string to format an ArrayContainsError +func (l DefaultLocale) ArrayContains() string { + return `At least one of the items must match` +} + +// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least {{.min}} properties` +} + +// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most {{.max}} properties` +} + +// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property {{.property}} is not allowed` +} + +// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "{{.property}}" does not match pattern {{.pattern}}` +} + +// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError +func (l DefaultLocale) InvalidPropertyName() string { + return `Property name of "{{.property}}" does not match` +} + +// StringGTE returns a format-string to format an StringLengthGTEError +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to {{.min}}` +} + +// StringLTE returns a format-string to format an StringLengthLTEError +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to {{.max}}` +} + +// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '{{.pattern}}'` +} + +// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '{{.format}}'` +} + +// MultipleOf returns a format-string to format an MultipleOfError +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of {{.multiple}}` +} + +// NumberGTE returns the format string to format a NumberGTEError +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to {{.min}}` +} + +// NumberGT returns the format string to format a NumberGTError +func (l DefaultLocale) NumberGT() string { + return `Must be greater than {{.min}}` +} + +// NumberLTE returns the format string to format a NumberLTEError +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to {{.max}}` +} + +// NumberLT returns the format string to format a NumberLTError +func (l DefaultLocale) NumberLT() string { + return `Must be less than {{.max}}` +} + +// Schema validators + +// RegexPattern returns a format-string to format a regex-pattern error +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '{{.pattern}}'` +} + +// GreaterThanZero returns a format-string to format an error where a number must be greater than zero +func (l DefaultLocale) GreaterThanZero() string { + return `{{.number}} must be strictly greater than 0` +} + +// MustBeOfA returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfA() string { + return `{{.x}} must be of a {{.y}}` +} + +// MustBeOfAn returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfAn() string { + return `{{.x}} must be of an {{.y}}` +} + +// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error +func (l DefaultLocale) CannotBeUsedWithout() string { + return `{{.x}} cannot be used without {{.y}}` +} + +// CannotBeGT returns a format-string to format an error where a value are greater than allowed +func (l DefaultLocale) CannotBeGT() string { + return `{{.x}} cannot be greater than {{.y}}` +} + +// MustBeOfType returns a format-string to format an error where a value does not match the required type +func (l DefaultLocale) MustBeOfType() string { + return `{{.key}} must be of type {{.type}}` +} + +// MustBeValidRegex returns a format-string to format an error where a regex is invalid +func (l DefaultLocale) MustBeValidRegex() string { + return `{{.key}} must be a valid regex` +} + +// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format +func (l DefaultLocale) MustBeValidFormat() string { + return `{{.key}} must be a valid format {{.given}}` +} + +// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 +func (l DefaultLocale) MustBeGTEZero() string { + return `{{.key}} must be greater than or equal to 0` +} + +// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `{{.key}} cannot be greater than {{.y}}` +} + +// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `{{.key}} items must be {{.type}}` +} + +// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `{{.key}} items must be unique` +} + +// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference {{.reference}} must be canonical` +} + +// NotAValidType returns a format-string to format an invalid type error +func (l DefaultLocale) NotAValidType() string { + return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` +} + +// Duplicated returns a format-string to format an error where types are duplicated +func (l DefaultLocale) Duplicated() string { + return `{{.type}} type is duplicated` +} + +// HttpBadStatus returns a format-string for errors when loading a schema using HTTP +func (l DefaultLocale) HttpBadStatus() string { + return `Could not read schema from HTTP, response status is {{.status}}` +} + +// ErrorFormat returns a format string for errors +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `{{.field}}: {{.description}}` +} + +// ParseError returns a format-string for JSON parsing errors +func (l DefaultLocale) ParseError() string { + return `Expected: {{.expected}}, given: Invalid JSON` +} + +// ConditionThen returns a format-string for ConditionThenError errors +// If/Else +func (l DefaultLocale) ConditionThen() string { + return `Must validate "then" as "if" was valid` +} + +// ConditionElse returns a format-string for ConditionElseError errors +func (l DefaultLocale) ConditionElse() string { + return `Must validate "else" as "if" was not valid` +} + +// constants +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "valid schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go new file mode 100644 index 000000000..0a0179148 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -0,0 +1,220 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + // Field returns the field name without the root context + // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName + Field() string + // SetType sets the error-type + SetType(string) + // Type returns the error-type + Type() string + // SetContext sets the JSON-context for the error + SetContext(*JsonContext) + // Context returns the JSON-context of the error + Context() *JsonContext + // SetDescription sets a description for the error + SetDescription(string) + // Description returns the description of the error + Description() string + // SetDescriptionFormat sets the format for the description in the default text/template format + SetDescriptionFormat(string) + // DescriptionFormat returns the format for the description in the default text/template format + DescriptionFormat() string + // SetValue sets the value related to the error + SetValue(interface{}) + // Value returns the value related to the error + Value() interface{} + // SetDetails sets the details specific to the error + SetDetails(ErrorDetails) + // Details returns details about the error + Details() ErrorDetails + // String returns a string representation of the error + String() string + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + descriptionFormat string // A format for human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + // Result holds the result of a validation + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field returns the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +// SetType sets the error-type +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +// Type returns the error-type +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +// SetContext sets the JSON-context for the error +func (v *ResultErrorFields) SetContext(context *JsonContext) { + v.context = context +} + +// Context returns the JSON-context of the error +func (v *ResultErrorFields) Context() *JsonContext { + return v.context +} + +// SetDescription sets a description for the error +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +// Description returns the description of the error +func (v *ResultErrorFields) Description() string { + return v.description +} + +// SetDescriptionFormat sets the format for the description in the default text/template format +func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { + v.descriptionFormat = descriptionFormat +} + +// DescriptionFormat returns the format for the description in the default text/template format +func (v *ResultErrorFields) DescriptionFormat() string { + return v.descriptionFormat +} + +// SetValue sets the value related to the error +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +// Value returns the value related to the error +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +// SetDetails sets the details specific to the error +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +// Details returns details about the error +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +// String returns a string representation of the error +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJSONString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +// Valid indicates if no errors were found +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +// Errors returns the errors that were found +func (v *Result) Errors() []ResultError { + return v.errors +} + +// AddError appends a fully filled error to the error set +// SetDescription() will be called with the result of the parsed err.DescriptionFormat() +func (v *Result) AddError(err ResultError, details ErrorDetails) { + if _, exists := details["context"]; !exists && err.Context() != nil { + details["context"] = err.Context().String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) + + v.errors = append(v.errors, err) +} + +func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go new file mode 100644 index 000000000..9e93cd795 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -0,0 +1,1087 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "math/big" + "reflect" + "regexp" + "text/template" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} + + // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. + ErrorTemplateFuncs template.FuncMap +) + +// NewSchema instances a schema using the given JSONLoader +func NewSchema(l JSONLoader) (*Schema, error) { + return NewSchemaLoader().Compile(l) +} + +// Schema holds a schema +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}, draft Draft) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} + return d.parseSchema(document, d.rootSchema) +} + +// SetRootSchemaName sets the root-schema name +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if currentSchema.draft == nil { + if currentSchema.parent == nil { + return errors.New("Draft not set") + } + currentSchema.draft = currentSchema.parent.draft + } + + // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" + if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { + b := documentNode.(bool) + currentSchema.pass = &b + return nil + } + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.ParseError(), + ErrorDetails{ + "expected": STRING_SCHEMA, + }, + )) + } + + m := documentNode.(map[string]interface{}) + + if currentSchema.parent == nil { + currentSchema.ref = &d.documentReference + currentSchema.id = &d.documentReference + } + + if currentSchema.id == nil && currentSchema.parent != nil { + currentSchema.id = currentSchema.parent.id + } + + // In draft 6 the id keyword was renamed to $id + // Hybrid mode uses the old id by default + var keyID string + + switch *currentSchema.draft { + case Draft4: + keyID = KEY_ID + case Hybrid: + keyID = KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + default: + keyID = KEY_ID_NEW + } + if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": keyID, + }, + )) + } + if k, ok := m[keyID].(string); ok { + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + if currentSchema == d.rootSchema { + currentSchema.id = &jsonReference + } else { + ref, err := currentSchema.parent.id.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.id = ref + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { + for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map, reflect.Bool) { + + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} + + err := d.parseSchema(dv, newSchema) + + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + + if k, ok := m[KEY_REF].(string); ok { + + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + currentSchema.ref = &jsonReference + + if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { + currentSchema.refSchema = sch + } else { + err := d.parseReference(documentNode, currentSchema) + + if err != nil { + return err + } + + return nil + } + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + if err := currentSchema.types.Add(typeInArray.(string)); err != nil { + return err + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // propertyNames + if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { + if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertyNames = newSchema + err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if !ok { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, + )) + } + currentSchema.format = formatString + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + if isStringInSlice(currentSchema.required, requiredValue.(string)) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + currentSchema.required = append(currentSchema.required, requiredValue.(string)) + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { + newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.contains = newSchema + err := d.parseSchema(m[KEY_CONTAINS], newSchema) + if err != nil { + return err + } + } + + // validation : all + + if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { + is, err := marshalWithoutNumber(m[KEY_CONST]) + if err != nil { + return err + } + currentSchema._const = is + } + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + is, err := marshalWithoutNumber(v) + if err != nil { + return err + } + if isStringInSlice(currentSchema.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + currentSchema.enum = append(currentSchema.enum, *is) + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.oneOf = append(currentSchema.oneOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.anyOf = append(currentSchema.anyOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.allOf = append(currentSchema.allOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.not = newSchema + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + if *currentSchema.draft >= Draft7 { + if existsMapKey(m, KEY_IF) { + if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} + currentSchema._if = newSchema + err := d.parseSchema(m[KEY_IF], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_THEN) { + if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} + currentSchema._then = newSchema + err := d.parseSchema(m[KEY_THEN], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_ELSE) { + if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} + currentSchema._else = newSchema + err := d.parseSchema(m[KEY_ELSE], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, + )) + } + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { + var ( + refdDocumentNode interface{} + dsp *schemaPoolDocument + err error + ) + + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + + d.referencePool.Add(currentSchema.ref.String(), newSchema) + + dsp, err = d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + newSchema.id = currentSchema.ref + + refdDocumentNode = dsp.Document + newSchema.draft = dsp.Draft + + if err != nil { + return err + } + + if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + err = d.parseSchema(refdDocumentNode, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + valuesToRegister = append(valuesToRegister, value.(string)) + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map, reflect.Bool: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go new file mode 100644 index 000000000..20db0c1f9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go @@ -0,0 +1,206 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "bytes" + "errors" + + "github.com/xeipuuv/gojsonreference" +) + +// SchemaLoader is used to load schemas +type SchemaLoader struct { + pool *schemaPool + AutoDetect bool + Validate bool + Draft Draft +} + +// NewSchemaLoader creates a new NewSchemaLoader +func NewSchemaLoader() *SchemaLoader { + + ps := &SchemaLoader{ + pool: &schemaPool{ + schemaPoolDocuments: make(map[string]*schemaPoolDocument), + }, + AutoDetect: true, + Validate: false, + Draft: Hybrid, + } + ps.pool.autoDetect = &ps.AutoDetect + + return ps +} + +func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { + + var ( + schema string + err error + ) + if sl.AutoDetect { + schema, _, err = parseSchemaURL(documentNode) + if err != nil { + return err + } + } + + // If no explicit "$schema" is used, use the default metaschema associated with the draft used + if schema == "" { + if sl.Draft == Hybrid { + return nil + } + schema = drafts.GetSchemaURL(sl.Draft) + } + + //Disable validation when loading the metaschema to prevent an infinite recursive loop + sl.Validate = false + + metaSchema, err := sl.Compile(NewReferenceLoader(schema)) + + if err != nil { + return err + } + + sl.Validate = true + + result := metaSchema.validateDocument(documentNode) + + if !result.Valid() { + var res bytes.Buffer + for _, err := range result.Errors() { + res.WriteString(err.String()) + res.WriteString("\n") + } + return errors.New(res.String()) + } + + return nil +} + +// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require +// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema +func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { + emptyRef, _ := gojsonreference.NewJsonReference("") + + for _, loader := range loaders { + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + // Directly use the Recursive function, so that it get only added to the schema pool by $id + // and not by the ref of the document as it's empty + if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { + return err + } + } + + return nil +} + +//AddSchema adds a schema under the provided URL to the schema cache +func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { + + ref, err := gojsonreference.NewJsonReference(url) + + if err != nil { + return err + } + + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + return sl.pool.parseReferences(doc, ref, true) +} + +// Compile loads and compiles a schema +func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { + + ref, err := rootSchema.JsonReference() + + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = sl.pool + d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + } else { + // Load JSON directly + doc, err = rootSchema.LoadJSON() + if err != nil { + return nil, err + } + // References need only be parsed if loading JSON directly + // as pool.GetDocument already does this for us if loading by reference + err = sl.pool.parseReferences(doc, ref, true) + if err != nil { + return nil, err + } + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return nil, err + } + } + + draft := sl.Draft + if sl.AutoDetect { + _, detectedDraft, err := parseSchemaURL(doc) + if err != nil { + return nil, err + } + if detectedDraft != nil { + draft = *detectedDraft + } + } + + err = d.parse(doc, draft) + if err != nil { + return nil, err + } + + return &d, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go new file mode 100644 index 000000000..35b1cc630 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -0,0 +1,215 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} + Draft *Draft +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + jsonLoaderFactory JSONLoaderFactory + autoDetect *bool +} + +func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { + + var ( + draft *Draft + err error + reference = ref.String() + ) + // Only the root document should be added to the schema pool if pooled is true + if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { + return fmt.Errorf("Reference already exists: \"%s\"", reference) + } + + if *p.autoDetect { + _, draft, err = parseSchemaURL(document) + if err != nil { + return err + } + } + + err = p.parseReferencesRecursive(document, ref, draft) + + if pooled { + p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} + } + + return err +} + +func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { + // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. + // For $ref references it takes into account the $id scope it is in and replaces + // the reference by the absolute resolved reference + + // When encountering errors it fails silently. Error handling is done when the schema + // is syntactically parsed and any error encountered here should also come up there. + switch m := document.(type) { + case []interface{}: + for _, v := range m { + p.parseReferencesRecursive(v, ref, draft) + } + case map[string]interface{}: + localRef := &ref + + keyID := KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) + if err == nil { + localRef, err = ref.Inherits(jsonReference) + if err == nil { + if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { + return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) + } + p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} + } + } + } + + if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) + if err == nil { + absoluteRef, err := localRef.Inherits(jsonReference) + if err == nil { + m[KEY_REF] = absoluteRef.String() + } + } + } + + for k, v := range m { + // const and enums should be interpreted literally, so ignore them + if k == KEY_CONST || k == KEY_ENUM { + continue + } + // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc + // Therefore don't treat it like a schema. + if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { + if child, ok := v.(map[string]interface{}); ok { + for _, v := range child { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } else { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } + return nil +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + var ( + spd *schemaPoolDocument + draft *Draft + ok bool + err error + ) + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + // Create a deep copy, so we can remove the fragment part later on without altering the original + refToURL, _ := gojsonreference.NewJsonReference(reference.String()) + + // First check if the given fragment is a location independent identifier + // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 + + if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + // If the given reference is not a location independent identifier, + // strip the fragment and look for a document with it's base URI + + refToURL.GetUrl().Fragment = "" + + if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { + document, _, err := reference.GetPointer().Get(cachedSpd.Document) + + if err != nil { + return nil, err + } + + if internalLogEnabled { + internalLog(" From pool") + } + + spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} + p.schemaPoolDocuments[reference.String()] = spd + + return spd, nil + } + + // It is not possible to load anything remotely that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference.String()}, + )) + } + + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() + + if err != nil { + return nil, err + } + + // add the whole document to the pool for potential re-use + p.parseReferences(document, refToURL, true) + + _, draft, _ = parseSchemaURL(document) + + // resolve the potential fragment and also cache it + document, _, err = reference.GetPointer().Get(document) + + if err != nil { + return nil, err + } + + return &schemaPoolDocument{Document: document, Draft: draft}, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go new file mode 100644 index 000000000..6e5e1b5cd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go @@ -0,0 +1,68 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + if _, ok := p.documents[ref]; !ok { + p.documents[ref] = sch + } +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go new file mode 100644 index 000000000..36b447a29 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go new file mode 100644 index 000000000..ec779812c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -0,0 +1,149 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "github.com/xeipuuv/gojsonreference" + "math/big" + "regexp" +) + +// Constants +const ( + KEY_SCHEMA = "$schema" + KEY_ID = "id" + KEY_ID_NEW = "$id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_PROPERTY_NAMES = "propertyNames" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_CONTAINS = "contains" + KEY_CONST = "const" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" + KEY_IF = "if" + KEY_THEN = "then" + KEY_ELSE = "else" +) + +type subSchema struct { + draft *Draft + + // basic subSchema meta properties + id *gojsonreference.JsonReference + title *string + description *string + + property string + + // Quick pass/fail for boolean schemas + pass *bool + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + + // hierarchy + parent *subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *big.Rat + maximum *big.Rat + exclusiveMaximum *big.Rat + minimum *big.Rat + exclusiveMinimum *big.Rat + + // validation : string + minLength *int + maxLength *int + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + propertyNames *subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + contains *subSchema + + additionalItems interface{} + + // validation : all + _const *string //const is a golang keyword + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema + _if *subSchema // if/else are golang keywords + _then *subSchema + _else *subSchema +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go new file mode 100644 index 000000000..0e6fd5173 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/types.go @@ -0,0 +1,62 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +// Type constants +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +// JSON_TYPES hosts the list of type that are supported in JSON +var JSON_TYPES []string + +// SCHEMA_TYPES hosts the list of type that are supported in schemas +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go new file mode 100644 index 000000000..a17d22e3b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -0,0 +1,197 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" +) + +func isKind(what interface{}, kinds ...reflect.Kind) bool { + target := what + if isJSONNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + targetKind := reflect.ValueOf(target).Kind() + for _, kind := range kinds { + if targetKind == kind { + return true + } + } + return false +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. +func indexStringInSlice(s []string, what string) int { + for i := range s { + if s[i] == what { + return i + } + } + return -1 +} + +func marshalToJSONString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func marshalWithoutNumber(value interface{}) (*string, error) { + + // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber + // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 + // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber + // so that these differences in representation are removed + + jsonString, err := marshalToJSONString(value) + if err != nil { + return nil, err + } + + var document interface{} + + err = json.Unmarshal([]byte(*jsonString), &document) + if err != nil { + return nil, err + } + + return marshalToJSONString(document) +} + +func isJSONNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJSONInteger(what interface{}) (isInt bool) { + + jsonNumber := what.(json.Number) + + bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) + + return isValidNumber && bigFloat.IsInt() + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func mustBeInteger(what interface{}) *int { + + if isJSONNumber(what) { + + number := what.(json.Number) + + isInt := checkJSONInteger(number) + + if isInt { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *big.Rat { + + if isJSONNumber(what) { + number := what.(json.Number) + float64Value, success := new(big.Rat).SetString(string(number)) + if success { + return float64Value + } + } + + return nil + +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go new file mode 100644 index 000000000..74091bca1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -0,0 +1,858 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +// Validate loads and validates a JSON schema +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + // load schema + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + return schema.Validate(ld) +} + +// Validate loads and validates a JSON document +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + root, err := l.LoadJSON() + if err != nil { + return nil, err + } + return v.validateDocument(root), nil +} + +func (v *Schema) validateDocument(root interface{}) *Result { + result := &Result{} + context := NewJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + return result +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle true/false schema as early as possible as all other fields will be nil + if currentSubSchema.pass != nil { + if !*currentSubSchema.pass { + result.addInternalError( + new(FalseError), + context, + currentNode, + ErrorDetails{}, + ) + } + return + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJSONNumber(currentNode) { + + value := currentNode.(json.Number) + + isInt := checkJSONInteger(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isInt { + givenType = TYPE_NUMBER + } + + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := NewJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addInternalError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + } + } + } + } + } + + if currentSubSchema._if != nil { + validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) + if currentSubSchema._then != nil && validationResultIf.Valid() { + validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) + if !validationResultThen.Valid() { + result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultThen) + } + } + if currentSubSchema._else != nil && !validationResultIf.Valid() { + validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) + if !validationResultElse.Valid() { + result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultElse) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // const: + if currentSubSchema._const != nil { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if *vString != *currentSubSchema._const { + result.addInternalError(new(ConstError), + context, + value, + ErrorDetails{ + "allowed": *currentSubSchema._const, + }, + ) + } + } + + // enum: + if len(currentSubSchema.enum) > 0 { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !isStringInSlice(currentSubSchema.enum, *vString) { + result.addInternalError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbValues := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbValues < int(*currentSubSchema.minItems) { + result.addInternalError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbValues > int(*currentSubSchema.maxItems) { + result.addInternalError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems = make(map[string]int) + for j, v := range value { + vString, err := marshalWithoutNumber(v) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if i, ok := stringifiedItems[*vString]; ok { + result.addInternalError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, + ) + } + stringifiedItems[*vString] = j + } + } + + // contains: + + if currentSubSchema.contains != nil { + validatedOne := false + var bestValidationResult *Result + + for i, v := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + + validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) + if validationResult.Valid() { + validatedOne = true + break + } else { + if bestValidationResult == nil || validationResult.score > bestValidationResult.score { + bestValidationResult = validationResult + } + } + } + if !validatedOne { + result.addInternalError( + new(ArrayContainsError), + context, + value, + ErrorDetails{}, + ) + if bestValidationResult != nil { + result.mergeErrors(bestValidationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addInternalError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addInternalError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addInternalError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + for pk := range value { + + // Check whether this property is described by "properties" + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + // Check whether this property is described by "patternProperties" + ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" + if !found && !ppMatch { + switch ap := currentSubSchema.additionalProperties.(type) { + case bool: + // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema + if !ap { + result.addInternalError( + new(AdditionalPropertyNotAllowedError), + context, + value[pk], + ErrorDetails{"property": pk}, + ) + + } + case *subSchema: + validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) + result.mergeErrors(validationResult) + } + } + } + + // propertyNames: + if currentSubSchema.propertyNames != nil { + for pk := range value { + validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) + if !validationResult.Valid() { + result.addInternalError(new(InvalidPropertyNameError), + context, + value, ErrorDetails{ + "property": pk, + }) + result.mergeErrors(validationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + validated := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + validated = true + subContext := NewJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + } + } + + if !validated { + return false + } + + result.incrementScore() + return true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore JSON numbers + if isJSONNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addInternalError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addInternalError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addInternalError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore non numbers + if !isJSONNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := new(big.Rat).SetString(string(number)) + + // multipleOf: + if currentSubSchema.multipleOf != nil { + if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { + result.addInternalError( + new(MultipleOfError), + context, + number, + ErrorDetails{ + "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), + }, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if float64Value.Cmp(currentSubSchema.maximum) == 1 { + result.addInternalError( + new(NumberLTEError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.maximum), + }, + ) + } + } + if currentSubSchema.exclusiveMaximum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { + result.addInternalError( + new(NumberLTError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), + }, + ) + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if float64Value.Cmp(currentSubSchema.minimum) == -1 { + result.addInternalError( + new(NumberGTEError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.minimum), + }, + ) + } + } + if currentSubSchema.exclusiveMinimum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { + result.addInternalError( + new(NumberGTError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), + }, + ) + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 000000000..fc3116090 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 000000000..5577c0f93 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,304 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// ErrPasswordTooLong is returned when the password passed to +// GenerateFromPassword is too long (i.e. > 72 bytes). +var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes") + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +// GenerateFromPassword does not accept passwords longer than 72 bytes, which +// is the longest password bcrypt will operate on. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + if len(password) > 72 { + return nil, ErrPasswordTooLong + } + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 000000000..9d80f1952 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 000000000..213bf204a --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 000000000..d04077595 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 3a1674a1e..401414dde 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -264,36 +264,35 @@ func (s *String) ReadASN1Boolean(out *bool) bool { return true } -var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() - // ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does -// not point to an integer or to a big.Int, it panics. It reports whether the -// read was successful. +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. func (s *String) ReadASN1Integer(out interface{}) bool { - if reflect.TypeOf(out).Kind() != reflect.Ptr { - panic("out is not a pointer") - } - switch reflect.ValueOf(out).Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: var i int64 if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { return false } reflect.ValueOf(out).Elem().SetInt(i) return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + case *uint, *uint8, *uint16, *uint32, *uint64: var u uint64 if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { return false } reflect.ValueOf(out).Elem().SetUint(u) return true - case reflect.Struct: - if reflect.TypeOf(out).Elem() == bigIntType { - return s.readASN1BigInt(out.(*big.Int)) - } + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") } - panic("out does not point to an integer type") } func checkASN1Integer(bytes []byte) bool { @@ -333,6 +332,21 @@ func (s *String) readASN1BigInt(out *big.Int) bool { return true } +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + func (s *String) readASN1Int64(out *int64) bool { var bytes String if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { @@ -532,7 +546,7 @@ func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { return false } - paddingBits := uint8(bytes[0]) + paddingBits := bytes[0] bytes = bytes[1:] if paddingBits > 7 || len(bytes) == 0 && paddingBits != 0 || @@ -554,7 +568,7 @@ func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool { return false } - paddingBits := uint8(bytes[0]) + paddingBits := bytes[0] if paddingBits != 0 { return false } @@ -654,34 +668,27 @@ func (s *String) SkipOptionalASN1(tag asn1.Tag) bool { return s.ReadASN1(&unused, tag) } -// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER -// explicitly tagged with tag into out and advances. If no element with a -// matching tag is present, it writes defaultValue into out instead. If out -// does not point to an integer or to a big.Int, it panics. It reports -// whether the read was successful. +// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER explicitly +// tagged with tag into out and advances. If no element with a matching tag is +// present, it writes defaultValue into out instead. Otherwise, it behaves like +// ReadASN1Integer. func (s *String) ReadOptionalASN1Integer(out interface{}, tag asn1.Tag, defaultValue interface{}) bool { - if reflect.TypeOf(out).Kind() != reflect.Ptr { - panic("out is not a pointer") - } var present bool var i String if !s.ReadOptionalASN1(&i, &present, tag) { return false } if !present { - switch reflect.ValueOf(out).Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch out.(type) { + case *int, *int8, *int16, *int32, *int64, + *uint, *uint8, *uint16, *uint32, *uint64, *[]byte: reflect.ValueOf(out).Elem().Set(reflect.ValueOf(defaultValue)) - case reflect.Struct: - if reflect.TypeOf(out).Elem() != bigIntType { - panic("invalid integer type") - } - if reflect.TypeOf(defaultValue).Kind() != reflect.Ptr || - reflect.TypeOf(defaultValue).Elem() != bigIntType { + case *big.Int: + if defaultValue, ok := defaultValue.(*big.Int); ok { + out.(*big.Int).Set(defaultValue) + } else { panic("out points to big.Int, but defaultValue does not") } - out.(*big.Int).Set(defaultValue.(*big.Int)) default: panic("invalid integer type") } diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 000000000..904b57e01 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go index 4c96147c8..3fd05b275 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -5,6 +5,8 @@ // Package salsa provides low-level access to functions in the Salsa family. package salsa // import "golang.org/x/crypto/salsa20/salsa" +import "math/bits" + // Sigma is the Salsa20 constant for 256-bit keys. var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} @@ -31,76 +33,76 @@ func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { for i := 0; i < 20; i += 2 { u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) + x4 ^= bits.RotateLeft32(u, 7) u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) + x8 ^= bits.RotateLeft32(u, 9) u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) + x12 ^= bits.RotateLeft32(u, 13) u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) + x0 ^= bits.RotateLeft32(u, 18) u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) + x9 ^= bits.RotateLeft32(u, 7) u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) + x13 ^= bits.RotateLeft32(u, 9) u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) + x1 ^= bits.RotateLeft32(u, 13) u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) + x5 ^= bits.RotateLeft32(u, 18) u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) + x14 ^= bits.RotateLeft32(u, 7) u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) + x2 ^= bits.RotateLeft32(u, 9) u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) + x6 ^= bits.RotateLeft32(u, 13) u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) + x10 ^= bits.RotateLeft32(u, 18) u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) + x3 ^= bits.RotateLeft32(u, 7) u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) + x7 ^= bits.RotateLeft32(u, 9) u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) + x11 ^= bits.RotateLeft32(u, 13) u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) + x15 ^= bits.RotateLeft32(u, 18) u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) + x1 ^= bits.RotateLeft32(u, 7) u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) + x2 ^= bits.RotateLeft32(u, 9) u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) + x3 ^= bits.RotateLeft32(u, 13) u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) + x0 ^= bits.RotateLeft32(u, 18) u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) + x6 ^= bits.RotateLeft32(u, 7) u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) + x7 ^= bits.RotateLeft32(u, 9) u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) + x4 ^= bits.RotateLeft32(u, 13) u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) + x5 ^= bits.RotateLeft32(u, 18) u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) + x11 ^= bits.RotateLeft32(u, 7) u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) + x8 ^= bits.RotateLeft32(u, 9) u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) + x9 ^= bits.RotateLeft32(u, 13) u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) + x10 ^= bits.RotateLeft32(u, 18) u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) + x12 ^= bits.RotateLeft32(u, 7) u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) + x13 ^= bits.RotateLeft32(u, 9) u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) + x14 ^= bits.RotateLeft32(u, 13) u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) + x15 ^= bits.RotateLeft32(u, 18) } out[0] = byte(x0) out[1] = byte(x0 >> 8) diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go index 9bfc0927c..7ec7bb39b 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -4,6 +4,8 @@ package salsa +import "math/bits" + // Core208 applies the Salsa20/8 core function to the 64-byte array in and puts // the result into the 64-byte array out. The input and output may be the same array. func Core208(out *[64]byte, in *[64]byte) { @@ -29,76 +31,76 @@ func Core208(out *[64]byte, in *[64]byte) { for i := 0; i < 8; i += 2 { u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) + x4 ^= bits.RotateLeft32(u, 7) u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) + x8 ^= bits.RotateLeft32(u, 9) u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) + x12 ^= bits.RotateLeft32(u, 13) u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) + x0 ^= bits.RotateLeft32(u, 18) u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) + x9 ^= bits.RotateLeft32(u, 7) u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) + x13 ^= bits.RotateLeft32(u, 9) u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) + x1 ^= bits.RotateLeft32(u, 13) u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) + x5 ^= bits.RotateLeft32(u, 18) u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) + x14 ^= bits.RotateLeft32(u, 7) u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) + x2 ^= bits.RotateLeft32(u, 9) u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) + x6 ^= bits.RotateLeft32(u, 13) u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) + x10 ^= bits.RotateLeft32(u, 18) u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) + x3 ^= bits.RotateLeft32(u, 7) u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) + x7 ^= bits.RotateLeft32(u, 9) u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) + x11 ^= bits.RotateLeft32(u, 13) u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) + x15 ^= bits.RotateLeft32(u, 18) u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) + x1 ^= bits.RotateLeft32(u, 7) u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) + x2 ^= bits.RotateLeft32(u, 9) u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) + x3 ^= bits.RotateLeft32(u, 13) u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) + x0 ^= bits.RotateLeft32(u, 18) u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) + x6 ^= bits.RotateLeft32(u, 7) u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) + x7 ^= bits.RotateLeft32(u, 9) u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) + x4 ^= bits.RotateLeft32(u, 13) u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) + x5 ^= bits.RotateLeft32(u, 18) u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) + x11 ^= bits.RotateLeft32(u, 7) u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) + x8 ^= bits.RotateLeft32(u, 9) u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) + x9 ^= bits.RotateLeft32(u, 13) u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) + x10 ^= bits.RotateLeft32(u, 18) u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) + x12 ^= bits.RotateLeft32(u, 7) u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) + x13 ^= bits.RotateLeft32(u, 9) u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) + x14 ^= bits.RotateLeft32(u, 13) u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) + x15 ^= bits.RotateLeft32(u, 18) } x0 += j0 x1 += j1 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go index 68169c6d6..e5cdb9a25 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -4,6 +4,8 @@ package salsa +import "math/bits" + const rounds = 20 // core applies the Salsa20 core function to 16-byte input in, 32-byte key k, @@ -31,76 +33,76 @@ func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { for i := 0; i < rounds; i += 2 { u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) + x4 ^= bits.RotateLeft32(u, 7) u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) + x8 ^= bits.RotateLeft32(u, 9) u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) + x12 ^= bits.RotateLeft32(u, 13) u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) + x0 ^= bits.RotateLeft32(u, 18) u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) + x9 ^= bits.RotateLeft32(u, 7) u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) + x13 ^= bits.RotateLeft32(u, 9) u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) + x1 ^= bits.RotateLeft32(u, 13) u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) + x5 ^= bits.RotateLeft32(u, 18) u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) + x14 ^= bits.RotateLeft32(u, 7) u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) + x2 ^= bits.RotateLeft32(u, 9) u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) + x6 ^= bits.RotateLeft32(u, 13) u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) + x10 ^= bits.RotateLeft32(u, 18) u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) + x3 ^= bits.RotateLeft32(u, 7) u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) + x7 ^= bits.RotateLeft32(u, 9) u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) + x11 ^= bits.RotateLeft32(u, 13) u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) + x15 ^= bits.RotateLeft32(u, 18) u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) + x1 ^= bits.RotateLeft32(u, 7) u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) + x2 ^= bits.RotateLeft32(u, 9) u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) + x3 ^= bits.RotateLeft32(u, 13) u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) + x0 ^= bits.RotateLeft32(u, 18) u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) + x6 ^= bits.RotateLeft32(u, 7) u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) + x7 ^= bits.RotateLeft32(u, 9) u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) + x4 ^= bits.RotateLeft32(u, 13) u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) + x5 ^= bits.RotateLeft32(u, 18) u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) + x11 ^= bits.RotateLeft32(u, 7) u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) + x8 ^= bits.RotateLeft32(u, 9) u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) + x9 ^= bits.RotateLeft32(u, 13) u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) + x10 ^= bits.RotateLeft32(u, 18) u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) + x12 ^= bits.RotateLeft32(u, 7) u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) + x13 ^= bits.RotateLeft32(u, 9) u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) + x14 ^= bits.RotateLeft32(u, 13) u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) + x15 ^= bits.RotateLeft32(u, 18) } x0 += j0 x1 += j1 diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 000000000..c971a99fa --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + x4 ^= bits.RotateLeft32(x0+x12, 7) + x8 ^= bits.RotateLeft32(x4+x0, 9) + x12 ^= bits.RotateLeft32(x8+x4, 13) + x0 ^= bits.RotateLeft32(x12+x8, 18) + + x9 ^= bits.RotateLeft32(x5+x1, 7) + x13 ^= bits.RotateLeft32(x9+x5, 9) + x1 ^= bits.RotateLeft32(x13+x9, 13) + x5 ^= bits.RotateLeft32(x1+x13, 18) + + x14 ^= bits.RotateLeft32(x10+x6, 7) + x2 ^= bits.RotateLeft32(x14+x10, 9) + x6 ^= bits.RotateLeft32(x2+x14, 13) + x10 ^= bits.RotateLeft32(x6+x2, 18) + + x3 ^= bits.RotateLeft32(x15+x11, 7) + x7 ^= bits.RotateLeft32(x3+x15, 9) + x11 ^= bits.RotateLeft32(x7+x3, 13) + x15 ^= bits.RotateLeft32(x11+x7, 18) + + x1 ^= bits.RotateLeft32(x0+x3, 7) + x2 ^= bits.RotateLeft32(x1+x0, 9) + x3 ^= bits.RotateLeft32(x2+x1, 13) + x0 ^= bits.RotateLeft32(x3+x2, 18) + + x6 ^= bits.RotateLeft32(x5+x4, 7) + x7 ^= bits.RotateLeft32(x6+x5, 9) + x4 ^= bits.RotateLeft32(x7+x6, 13) + x5 ^= bits.RotateLeft32(x4+x7, 18) + + x11 ^= bits.RotateLeft32(x10+x9, 7) + x8 ^= bits.RotateLeft32(x11+x10, 9) + x9 ^= bits.RotateLeft32(x8+x11, 13) + x10 ^= bits.RotateLeft32(x9+x8, 18) + + x12 ^= bits.RotateLeft32(x15+x14, 7) + x13 ^= bits.RotateLeft32(x12+x15, 9) + x14 ^= bits.RotateLeft32(x13+x12, 13) + x15 ^= bits.RotateLeft32(x14+x13, 18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + R := 32 * r + x := xy + y := xy[R:] + + j := 0 + for i := 0; i < R; i++ { + x[i] = binary.LittleEndian.Uint32(b[j:]) + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*R:], x, R) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*R:], y, R) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*R:], R) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*R:], R) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:R] { + binary.LittleEndian.PutUint32(b[j:], v) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/helm.sh/helm/v3/LICENSE b/vendor/helm.sh/helm/v3/LICENSE new file mode 100644 index 000000000..21c57fae2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Kubernetes Authors All Rights Reserved + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/helm.sh/helm/v3/internal/ignore/doc.go b/vendor/helm.sh/helm/v3/internal/ignore/doc.go new file mode 100644 index 000000000..e6a6a6c7b --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/ignore/doc.go @@ -0,0 +1,67 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package ignore provides tools for writing ignore files (a la .gitignore). + +This provides both an ignore parser and a file-aware processor. + +The format of ignore files closely follows, but does not exactly match, the +format for .gitignore files (https://git-scm.com/docs/gitignore). + +The formatting rules are as follows: + + - Parsing is line-by-line + - Empty lines are ignored + - Lines the begin with # (comments) will be ignored + - Leading and trailing spaces are always ignored + - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) + - There is no support for multi-line patterns + - Shell glob patterns are supported. See Go's "path/filepath".Match + - If a pattern begins with a leading !, the match will be negated. + - If a pattern begins with a leading /, only paths relatively rooted will match. + - If the pattern ends with a trailing /, only directories will match + - If a pattern contains no slashes, file basenames are tested (not paths) + - The pattern sequence "**", while legal in a glob, will cause an error here + (to indicate incompatibility with .gitignore). + +Example: + + # Match any file named foo.txt + foo.txt + + # Match any text file + *.txt + + # Match only directories named mydir + mydir/ + + # Match only text files in the top-level directory + /*.txt + + # Match only the file foo.txt in the top-level directory + /foo.txt + + # Match any file named ab.txt, ac.txt, or ad.txt + a[b-d].txt + +Notable differences from .gitignore: + - The '**' syntax is not supported. + - The globbing library is Go's 'filepath.Match', not fnmatch(3) + - Trailing spaces are always ignored (there is no supported escape sequence) + - The evaluation of escape sequences has not been tested for compatibility + - There is no support for '\!' as a special leading sequence. +*/ +package ignore // import "helm.sh/helm/v3/internal/ignore" diff --git a/vendor/helm.sh/helm/v3/internal/ignore/rules.go b/vendor/helm.sh/helm/v3/internal/ignore/rules.go new file mode 100644 index 000000000..a80923baf --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/ignore/rules.go @@ -0,0 +1,228 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ignore + +import ( + "bufio" + "bytes" + "io" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +// HelmIgnore default name of an ignorefile. +const HelmIgnore = ".helmignore" + +// Rules is a collection of path matching rules. +// +// Parse() and ParseFile() will construct and populate new Rules. +// Empty() will create an immutable empty ruleset. +type Rules struct { + patterns []*pattern +} + +// Empty builds an empty ruleset. +func Empty() *Rules { + return &Rules{patterns: []*pattern{}} +} + +// AddDefaults adds default ignore patterns. +// +// Ignore all dotfiles in "templates/" +func (r *Rules) AddDefaults() { + r.parseRule(`templates/.?*`) +} + +// ParseFile parses a helmignore file and returns the *Rules. +func ParseFile(file string) (*Rules, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + return Parse(f) +} + +// Parse parses a rules file +func Parse(file io.Reader) (*Rules, error) { + r := &Rules{patterns: []*pattern{}} + + s := bufio.NewScanner(file) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for s.Scan() { + scannedBytes := s.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + line := string(scannedBytes) + currentLine++ + + if err := r.parseRule(line); err != nil { + return r, err + } + } + return r, s.Err() +} + +// Ignore evaluates the file at the given path, and returns true if it should be ignored. +// +// Ignore evaluates path against the rules in order. Evaluation stops when a match +// is found. Matching a negative rule will stop evaluation. +func (r *Rules) Ignore(path string, fi os.FileInfo) bool { + // Don't match on empty dirs. + if path == "" { + return false + } + + // Disallow ignoring the current working directory. + // See issue: + // 1776 (New York City) Hamilton: "Pardon me, are you Aaron Burr, sir?" + if path == "." || path == "./" { + return false + } + for _, p := range r.patterns { + if p.match == nil { + log.Printf("ignore: no matcher supplied for %q", p.raw) + return false + } + + // For negative rules, we need to capture and return non-matches, + // and continue for matches. + if p.negate { + if p.mustDir && !fi.IsDir() { + return true + } + if !p.match(path, fi) { + return true + } + continue + } + + // If the rule is looking for directories, and this is not a directory, + // skip it. + if p.mustDir && !fi.IsDir() { + continue + } + if p.match(path, fi) { + return true + } + } + return false +} + +// parseRule parses a rule string and creates a pattern, which is then stored in the Rules object. +func (r *Rules) parseRule(rule string) error { + rule = strings.TrimSpace(rule) + + // Ignore blank lines + if rule == "" { + return nil + } + // Comment + if strings.HasPrefix(rule, "#") { + return nil + } + + // Fail any rules that contain ** + if strings.Contains(rule, "**") { + return errors.New("double-star (**) syntax is not supported") + } + + // Fail any patterns that can't compile. A non-empty string must be + // given to Match() to avoid optimization that skips rule evaluation. + if _, err := filepath.Match(rule, "abc"); err != nil { + return err + } + + p := &pattern{raw: rule} + + // Negation is handled at a higher level, so strip the leading ! from the + // string. + if strings.HasPrefix(rule, "!") { + p.negate = true + rule = rule[1:] + } + + // Directory verification is handled by a higher level, so the trailing / + // is removed from the rule. That way, a directory named "foo" matches, + // even if the supplied string does not contain a literal slash character. + if strings.HasSuffix(rule, "/") { + p.mustDir = true + rule = strings.TrimSuffix(rule, "/") + } + + if strings.HasPrefix(rule, "/") { + // Require path matches the root path. + p.match = func(n string, fi os.FileInfo) bool { + rule = strings.TrimPrefix(rule, "/") + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } else if strings.Contains(rule, "/") { + // require structural match. + p.match = func(n string, fi os.FileInfo) bool { + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } else { + p.match = func(n string, fi os.FileInfo) bool { + // When there is no slash in the pattern, we evaluate ONLY the + // filename. + n = filepath.Base(n) + ok, err := filepath.Match(rule, n) + if err != nil { + log.Printf("Failed to compile %q: %s", rule, err) + return false + } + return ok + } + } + + r.patterns = append(r.patterns, p) + return nil +} + +// matcher is a function capable of computing a match. +// +// It returns true if the rule matches. +type matcher func(name string, fi os.FileInfo) bool + +// pattern describes a pattern to be matched in a rule set. +type pattern struct { + // raw is the unparsed string, with nothing stripped. + raw string + // match is the matcher function. + match matcher + // negate indicates that the rule's outcome should be negated. + negate bool + // mustDir indicates that the matched file must be a directory. + mustDir bool +} diff --git a/vendor/helm.sh/helm/v3/internal/sympath/walk.go b/vendor/helm.sh/helm/v3/internal/sympath/walk.go new file mode 100644 index 000000000..a276cfeff --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/sympath/walk.go @@ -0,0 +1,119 @@ +/* +Copyright (c) for portions of walk.go are held by The Go Authors, 2009 and are +provided under the BSD license. + +https://github.com/golang/go/blob/master/LICENSE + +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sympath + +import ( + "log" + "os" + "path/filepath" + "sort" + + "github.com/pkg/errors" +) + +// Walk walks the file tree rooted at root, calling walkFn for each file or directory +// in the tree, including root. All errors that arise visiting files and directories +// are filtered by walkFn. The files are walked in lexical order, which makes the +// output deterministic but means that for very large directories Walk can be +// inefficient. Walk follows symbolic links. +func Walk(root string, walkFn filepath.WalkFunc) error { + info, err := os.Lstat(root) + if err != nil { + err = walkFn(root, nil, err) + } else { + err = symwalk(root, info, walkFn) + } + if err == filepath.SkipDir { + return nil + } + return err +} + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +func readDirNames(dirname string) ([]string, error) { + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// symwalk recursively descends path, calling walkFn. +func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // Recursively walk symlinked directories. + if IsSymlink(info) { + resolved, err := filepath.EvalSymlinks(path) + if err != nil { + return errors.Wrapf(err, "error evaluating symlink %s", path) + } + log.Printf("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved) + if info, err = os.Lstat(resolved); err != nil { + return err + } + if err := symwalk(path, info, walkFn); err != nil && err != filepath.SkipDir { + return err + } + return nil + } + + if err := walkFn(path, info, nil); err != nil { + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := os.Lstat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = symwalk(filename, fileInfo, walkFn) + if err != nil { + if (!fileInfo.IsDir() && !IsSymlink(fileInfo)) || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// IsSymlink is used to determine if the fileinfo is a symbolic link. +func IsSymlink(fi os.FileInfo) bool { + return fi.Mode()&os.ModeSymlink != 0 +} diff --git a/vendor/helm.sh/helm/v3/internal/version/version.go b/vendor/helm.sh/helm/v3/internal/version/version.go new file mode 100644 index 000000000..3cfcfef92 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/version/version.go @@ -0,0 +1,81 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version // import "helm.sh/helm/v3/internal/version" + +import ( + "flag" + "runtime" + "strings" +) + +var ( + // version is the current version of Helm. + // Update this whenever making a new release. + // The version is of the format Major.Minor.Patch[-Prerelease][+BuildMetadata] + // + // Increment major number for new feature additions and behavioral changes. + // Increment minor number for bug fixes and performance enhancements. + version = "v3.11" + + // metadata is extra build time data + metadata = "" + // gitCommit is the git sha1 + gitCommit = "" + // gitTreeState is the state of the git tree + gitTreeState = "" +) + +// BuildInfo describes the compile time information. +type BuildInfo struct { + // Version is the current semver. + Version string `json:"version,omitempty"` + // GitCommit is the git sha1. + GitCommit string `json:"git_commit,omitempty"` + // GitTreeState is the state of the git tree. + GitTreeState string `json:"git_tree_state,omitempty"` + // GoVersion is the version of the Go compiler used. + GoVersion string `json:"go_version,omitempty"` +} + +// GetVersion returns the semver string of the version +func GetVersion() string { + if metadata == "" { + return version + } + return version + "+" + metadata +} + +// GetUserAgent returns a user agent for user with an HTTP client +func GetUserAgent() string { + return "Helm/" + strings.TrimPrefix(GetVersion(), "v") +} + +// Get returns build info +func Get() BuildInfo { + v := BuildInfo{ + Version: GetVersion(), + GitCommit: gitCommit, + GitTreeState: gitTreeState, + GoVersion: runtime.Version(), + } + + // HACK(bacongobbler): strip out GoVersion during a test run for consistent test output + if flag.Lookup("test.v") != nil { + v.GoVersion = "" + } + return v +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/chart.go b/vendor/helm.sh/helm/v3/pkg/chart/chart.go new file mode 100644 index 000000000..a3bed63a3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/chart.go @@ -0,0 +1,173 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "path/filepath" + "regexp" + "strings" +) + +// APIVersionV1 is the API version number for version 1. +const APIVersionV1 = "v1" + +// APIVersionV2 is the API version number for version 2. +const APIVersionV2 = "v2" + +// aliasNameFormat defines the characters that are legal in an alias name. +var aliasNameFormat = regexp.MustCompile("^[a-zA-Z0-9_-]+$") + +// Chart is a helm package that contains metadata, a default config, zero or more +// optionally parameterizable templates, and zero or more charts (dependencies). +type Chart struct { + // Raw contains the raw contents of the files originally contained in the chart archive. + // + // This should not be used except in special cases like `helm show values`, + // where we want to display the raw values, comments and all. + Raw []*File `json:"-"` + // Metadata is the contents of the Chartfile. + Metadata *Metadata `json:"metadata"` + // Lock is the contents of Chart.lock. + Lock *Lock `json:"lock"` + // Templates for this chart. + Templates []*File `json:"templates"` + // Values are default config for this chart. + Values map[string]interface{} `json:"values"` + // Schema is an optional JSON schema for imposing structure on Values + Schema []byte `json:"schema"` + // Files are miscellaneous files in a chart archive, + // e.g. README, LICENSE, etc. + Files []*File `json:"files"` + + parent *Chart + dependencies []*Chart +} + +type CRD struct { + // Name is the File.Name for the crd file + Name string + // Filename is the File obj Name including (sub-)chart.ChartFullPath + Filename string + // File is the File obj for the crd + File *File +} + +// SetDependencies replaces the chart dependencies. +func (ch *Chart) SetDependencies(charts ...*Chart) { + ch.dependencies = nil + ch.AddDependency(charts...) +} + +// Name returns the name of the chart. +func (ch *Chart) Name() string { + if ch.Metadata == nil { + return "" + } + return ch.Metadata.Name +} + +// AddDependency determines if the chart is a subchart. +func (ch *Chart) AddDependency(charts ...*Chart) { + for i, x := range charts { + charts[i].parent = ch + ch.dependencies = append(ch.dependencies, x) + } +} + +// Root finds the root chart. +func (ch *Chart) Root() *Chart { + if ch.IsRoot() { + return ch + } + return ch.Parent().Root() +} + +// Dependencies are the charts that this chart depends on. +func (ch *Chart) Dependencies() []*Chart { return ch.dependencies } + +// IsRoot determines if the chart is the root chart. +func (ch *Chart) IsRoot() bool { return ch.parent == nil } + +// Parent returns a subchart's parent chart. +func (ch *Chart) Parent() *Chart { return ch.parent } + +// ChartPath returns the full path to this chart in dot notation. +func (ch *Chart) ChartPath() string { + if !ch.IsRoot() { + return ch.Parent().ChartPath() + "." + ch.Name() + } + return ch.Name() +} + +// ChartFullPath returns the full path to this chart. +func (ch *Chart) ChartFullPath() string { + if !ch.IsRoot() { + return ch.Parent().ChartFullPath() + "/charts/" + ch.Name() + } + return ch.Name() +} + +// Validate validates the metadata. +func (ch *Chart) Validate() error { + return ch.Metadata.Validate() +} + +// AppVersion returns the appversion of the chart. +func (ch *Chart) AppVersion() string { + if ch.Metadata == nil { + return "" + } + return ch.Metadata.AppVersion +} + +// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart. +// Deprecated: use CRDObjects() +func (ch *Chart) CRDs() []*File { + files := []*File{} + // Find all resources in the crds/ directory + for _, f := range ch.Files { + if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { + files = append(files, f) + } + } + // Get CRDs from dependencies, too. + for _, dep := range ch.Dependencies() { + files = append(files, dep.CRDs()...) + } + return files +} + +// CRDObjects returns a list of CRD objects in the 'crds/' directory of a Helm chart & subcharts +func (ch *Chart) CRDObjects() []CRD { + crds := []CRD{} + // Find all resources in the crds/ directory + for _, f := range ch.Files { + if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { + mycrd := CRD{Name: f.Name, Filename: filepath.Join(ch.ChartFullPath(), f.Name), File: f} + crds = append(crds, mycrd) + } + } + // Get CRDs from dependencies, too. + for _, dep := range ch.Dependencies() { + crds = append(crds, dep.CRDObjects()...) + } + return crds +} + +func hasManifestExtension(fname string) bool { + ext := filepath.Ext(fname) + return strings.EqualFold(ext, ".yaml") || strings.EqualFold(ext, ".yml") || strings.EqualFold(ext, ".json") +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/dependency.go b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go new file mode 100644 index 000000000..4ef5eeb32 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go @@ -0,0 +1,82 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import "time" + +// Dependency describes a chart upon which another chart depends. +// +// Dependencies can be used to express developer intent, or to capture the state +// of a chart. +type Dependency struct { + // Name is the name of the dependency. + // + // This must mach the name in the dependency's Chart.yaml. + Name string `json:"name"` + // Version is the version (range) of this chart. + // + // A lock file will always produce a single version, while a dependency + // may contain a semantic version range. + Version string `json:"version,omitempty"` + // The URL to the repository. + // + // Appending `index.yaml` to this string should result in a URL that can be + // used to fetch the repository index. + Repository string `json:"repository"` + // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled ) + Condition string `json:"condition,omitempty"` + // Tags can be used to group charts for enabling/disabling together + Tags []string `json:"tags,omitempty"` + // Enabled bool determines if chart should be loaded + Enabled bool `json:"enabled,omitempty"` + // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a + // string or pair of child/parent sublist items. + ImportValues []interface{} `json:"import-values,omitempty"` + // Alias usable alias to be used for the chart + Alias string `json:"alias,omitempty"` +} + +// Validate checks for common problems with the dependency datastructure in +// the chart. This check must be done at load time before the dependency's charts are +// loaded. +func (d *Dependency) Validate() error { + if d == nil { + return ValidationError("dependencies must not contain empty or null nodes") + } + d.Name = sanitizeString(d.Name) + d.Version = sanitizeString(d.Version) + d.Repository = sanitizeString(d.Repository) + d.Condition = sanitizeString(d.Condition) + for i := range d.Tags { + d.Tags[i] = sanitizeString(d.Tags[i]) + } + if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) { + return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name) + } + return nil +} + +// Lock is a lock file for dependencies. +// +// It represents the state that the dependencies should be in. +type Lock struct { + // Generated is the date the lock file was last generated. + Generated time.Time `json:"generated"` + // Digest is a hash of the dependencies in Chart.yaml. + Digest string `json:"digest"` + // Dependencies is the list of dependencies that this lock file has locked. + Dependencies []*Dependency `json:"dependencies"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/errors.go b/vendor/helm.sh/helm/v3/pkg/chart/errors.go new file mode 100644 index 000000000..2fad5f370 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/errors.go @@ -0,0 +1,30 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import "fmt" + +// ValidationError represents a data validation error. +type ValidationError string + +func (v ValidationError) Error() string { + return "validation: " + string(v) +} + +// ValidationErrorf takes a message and formatting options and creates a ValidationError +func ValidationErrorf(msg string, args ...interface{}) ValidationError { + return ValidationError(fmt.Sprintf(msg, args...)) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/file.go b/vendor/helm.sh/helm/v3/pkg/chart/file.go new file mode 100644 index 000000000..9dd7c08d5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/file.go @@ -0,0 +1,27 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +// File represents a file as a name/value pair. +// +// By convention, name is a relative path within the scope of the chart's +// base directory. +type File struct { + // Name is the path-like name of the template. + Name string `json:"name"` + // Data is the template as byte data. + Data []byte `json:"data"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go new file mode 100644 index 000000000..8b38cb89f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go @@ -0,0 +1,196 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" +) + +var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) + +// FileLoader loads a chart from a file +type FileLoader string + +// Load loads a chart +func (l FileLoader) Load() (*chart.Chart, error) { + return LoadFile(string(l)) +} + +// LoadFile loads from an archive file. +func LoadFile(name string) (*chart.Chart, error) { + if fi, err := os.Stat(name); err != nil { + return nil, err + } else if fi.IsDir() { + return nil, errors.New("cannot load a directory") + } + + raw, err := os.Open(name) + if err != nil { + return nil, err + } + defer raw.Close() + + err = ensureArchive(name, raw) + if err != nil { + return nil, err + } + + c, err := LoadArchive(raw) + if err != nil { + if err == gzip.ErrHeader { + return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) + } + } + return c, err +} + +// ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive. +// +// Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence +// of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error +// if we didn't check for this. +func ensureArchive(name string, raw *os.File) error { + defer raw.Seek(0, 0) // reset read offset to allow archive loading to proceed. + + // Check the file format to give us a chance to provide the user with more actionable feedback. + buffer := make([]byte, 512) + _, err := raw.Read(buffer) + if err != nil && err != io.EOF { + return fmt.Errorf("file '%s' cannot be read: %s", name, err) + } + if contentType := http.DetectContentType(buffer); contentType != "application/x-gzip" { + // TODO: Is there a way to reliably test if a file content is YAML? ghodss/yaml accepts a wide + // variety of content (Makefile, .zshrc) as valid YAML without errors. + + // Wrong content type. Let's check if it's yaml and give an extra hint? + if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") { + return fmt.Errorf("file '%s' seems to be a YAML file, but expected a gzipped archive", name) + } + return fmt.Errorf("file '%s' does not appear to be a gzipped archive; got '%s'", name, contentType) + } + return nil +} + +// LoadArchiveFiles reads in files out of an archive into memory. This function +// performs important path security checks and should always be used before +// expanding a tarball +func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { + unzipped, err := gzip.NewReader(in) + if err != nil { + return nil, err + } + defer unzipped.Close() + + files := []*BufferedFile{} + tr := tar.NewReader(unzipped) + for { + b := bytes.NewBuffer(nil) + hd, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + if hd.FileInfo().IsDir() { + // Use this instead of hd.Typeflag because we don't have to do any + // inference chasing. + continue + } + + switch hd.Typeflag { + // We don't want to process these extension header files. + case tar.TypeXGlobalHeader, tar.TypeXHeader: + continue + } + + // Archive could contain \ if generated on Windows + delimiter := "/" + if strings.ContainsRune(hd.Name, '\\') { + delimiter = "\\" + } + + parts := strings.Split(hd.Name, delimiter) + n := strings.Join(parts[1:], delimiter) + + // Normalize the path to the / delimiter + n = strings.ReplaceAll(n, delimiter, "/") + + if path.IsAbs(n) { + return nil, errors.New("chart illegally contains absolute paths") + } + + n = path.Clean(n) + if n == "." { + // In this case, the original path was relative when it should have been absolute. + return nil, errors.Errorf("chart illegally contains content outside the base directory: %q", hd.Name) + } + if strings.HasPrefix(n, "..") { + return nil, errors.New("chart illegally references parent directory") + } + + // In some particularly arcane acts of path creativity, it is possible to intermix + // UNIX and Windows style paths in such a way that you produce a result of the form + // c:/foo even after all the built-in absolute path checks. So we explicitly check + // for this condition. + if drivePathPattern.MatchString(n) { + return nil, errors.New("chart contains illegally named files") + } + + if parts[0] == "Chart.yaml" { + return nil, errors.New("chart yaml not in base directory") + } + + if _, err := io.Copy(b, tr); err != nil { + return nil, err + } + + data := bytes.TrimPrefix(b.Bytes(), utf8bom) + + files = append(files, &BufferedFile{Name: n, Data: data}) + b.Reset() + } + + if len(files) == 0 { + return nil, errors.New("no files in chart archive") + } + return files, nil +} + +// LoadArchive loads from a reader containing a compressed tar archive. +func LoadArchive(in io.Reader) (*chart.Chart, error) { + files, err := LoadArchiveFiles(in) + if err != nil { + return nil, err + } + + return LoadFiles(files) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go new file mode 100644 index 000000000..bbe543870 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go @@ -0,0 +1,120 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/ignore" + "helm.sh/helm/v3/internal/sympath" + "helm.sh/helm/v3/pkg/chart" +) + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +// DirLoader loads a chart from a directory +type DirLoader string + +// Load loads the chart +func (l DirLoader) Load() (*chart.Chart, error) { + return LoadDir(string(l)) +} + +// LoadDir loads from a directory. +// +// This loads charts only from directories. +func LoadDir(dir string) (*chart.Chart, error) { + topdir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + + // Just used for errors. + c := &chart.Chart{} + + rules := ignore.Empty() + ifile := filepath.Join(topdir, ignore.HelmIgnore) + if _, err := os.Stat(ifile); err == nil { + r, err := ignore.ParseFile(ifile) + if err != nil { + return c, err + } + rules = r + } + rules.AddDefaults() + + files := []*BufferedFile{} + topdir += string(filepath.Separator) + + walk := func(name string, fi os.FileInfo, err error) error { + n := strings.TrimPrefix(name, topdir) + if n == "" { + // No need to process top level. Avoid bug with helmignore .* matching + // empty names. See issue 1779. + return nil + } + + // Normalize to / since it will also work on Windows + n = filepath.ToSlash(n) + + if err != nil { + return err + } + if fi.IsDir() { + // Directory-based ignore rules should involve skipping the entire + // contents of that directory. + if rules.Ignore(n, fi) { + return filepath.SkipDir + } + return nil + } + + // If a .helmignore file matches, skip this file. + if rules.Ignore(n, fi) { + return nil + } + + // Irregular files include devices, sockets, and other uses of files that + // are not regular files. In Go they have a file mode type bit set. + // See https://golang.org/pkg/os/#FileMode for examples. + if !fi.Mode().IsRegular() { + return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name) + } + + data, err := ioutil.ReadFile(name) + if err != nil { + return errors.Wrapf(err, "error reading %s", n) + } + + data = bytes.TrimPrefix(data, utf8bom) + + files = append(files, &BufferedFile{Name: n, Data: data}) + return nil + } + if err = sympath.Walk(topdir, walk); err != nil { + return c, err + } + + return LoadFiles(files) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go new file mode 100644 index 000000000..7cc8878a8 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go @@ -0,0 +1,200 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "bytes" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// ChartLoader loads a chart. +type ChartLoader interface { + Load() (*chart.Chart, error) +} + +// Loader returns a new ChartLoader appropriate for the given chart name +func Loader(name string) (ChartLoader, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if fi.IsDir() { + return DirLoader(name), nil + } + return FileLoader(name), nil + +} + +// Load takes a string name, tries to resolve it to a file or directory, and then loads it. +// +// This is the preferred way to load a chart. It will discover the chart encoding +// and hand off to the appropriate chart reader. +// +// If a .helmignore file is present, the directory loader will skip loading any files +// matching it. But .helmignore is not evaluated when reading out of an archive. +func Load(name string) (*chart.Chart, error) { + l, err := Loader(name) + if err != nil { + return nil, err + } + return l.Load() +} + +// BufferedFile represents an archive file buffered for later processing. +type BufferedFile struct { + Name string + Data []byte +} + +// LoadFiles loads from in-memory files. +func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { + c := new(chart.Chart) + subcharts := make(map[string][]*BufferedFile) + + // do not rely on assumed ordering of files in the chart and crash + // if Chart.yaml was not coming early enough to initialize metadata + for _, f := range files { + c.Raw = append(c.Raw, &chart.File{Name: f.Name, Data: f.Data}) + if f.Name == "Chart.yaml" { + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil { + return c, errors.Wrap(err, "cannot load Chart.yaml") + } + // NOTE(bacongobbler): while the chart specification says that APIVersion must be set, + // Helm 2 accepted charts that did not provide an APIVersion in their chart metadata. + // Because of that, if APIVersion is unset, we should assume we're loading a v1 chart. + if c.Metadata.APIVersion == "" { + c.Metadata.APIVersion = chart.APIVersionV1 + } + } + } + for _, f := range files { + switch { + case f.Name == "Chart.yaml": + // already processed + continue + case f.Name == "Chart.lock": + c.Lock = new(chart.Lock) + if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil { + return c, errors.Wrap(err, "cannot load Chart.lock") + } + case f.Name == "values.yaml": + c.Values = make(map[string]interface{}) + if err := yaml.Unmarshal(f.Data, &c.Values); err != nil { + return c, errors.Wrap(err, "cannot load values.yaml") + } + case f.Name == "values.schema.json": + c.Schema = f.Data + + // Deprecated: requirements.yaml is deprecated use Chart.yaml. + // We will handle it for you because we are nice people + case f.Name == "requirements.yaml": + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if c.Metadata.APIVersion != chart.APIVersionV1 { + log.Printf("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.") + } + if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil { + return c, errors.Wrap(err, "cannot load requirements.yaml") + } + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + // Deprecated: requirements.lock is deprecated use Chart.lock. + case f.Name == "requirements.lock": + c.Lock = new(chart.Lock) + if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil { + return c, errors.Wrap(err, "cannot load requirements.lock") + } + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + + case strings.HasPrefix(f.Name, "templates/"): + c.Templates = append(c.Templates, &chart.File{Name: f.Name, Data: f.Data}) + case strings.HasPrefix(f.Name, "charts/"): + if filepath.Ext(f.Name) == ".prov" { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + continue + } + + fname := strings.TrimPrefix(f.Name, "charts/") + cname := strings.SplitN(fname, "/", 2)[0] + subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data}) + default: + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + } + + if c.Metadata == nil { + return c, errors.New("Chart.yaml file is missing") + } + + if err := c.Validate(); err != nil { + return c, err + } + + for n, files := range subcharts { + var sc *chart.Chart + var err error + switch { + case strings.IndexAny(n, "_.") == 0: + continue + case filepath.Ext(n) == ".tgz": + file := files[0] + if file.Name != n { + return c, errors.Errorf("error unpacking tar in %s: expected %s, got %s", c.Name(), n, file.Name) + } + // Untar the chart and add to c.Dependencies + sc, err = LoadArchive(bytes.NewBuffer(file.Data)) + default: + // We have to trim the prefix off of every file, and ignore any file + // that is in charts/, but isn't actually a chart. + buff := make([]*BufferedFile, 0, len(files)) + for _, f := range files { + parts := strings.SplitN(f.Name, "/", 2) + if len(parts) < 2 { + continue + } + f.Name = parts[1] + buff = append(buff, f) + } + sc, err = LoadFiles(buff) + } + + if err != nil { + return c, errors.Wrapf(err, "error unpacking %s in %s", n, c.Name()) + } + c.AddDependency(sc) + } + + return c, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go new file mode 100644 index 000000000..ae572abb7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go @@ -0,0 +1,163 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "strings" + "unicode" + + "github.com/Masterminds/semver/v3" +) + +// Maintainer describes a Chart maintainer. +type Maintainer struct { + // Name is a user name or organization name + Name string `json:"name,omitempty"` + // Email is an optional email address to contact the named maintainer + Email string `json:"email,omitempty"` + // URL is an optional URL to an address for the named maintainer + URL string `json:"url,omitempty"` +} + +// Validate checks valid data and sanitizes string characters. +func (m *Maintainer) Validate() error { + if m == nil { + return ValidationError("maintainers must not contain empty or null nodes") + } + m.Name = sanitizeString(m.Name) + m.Email = sanitizeString(m.Email) + m.URL = sanitizeString(m.URL) + return nil +} + +// Metadata for a Chart file. This models the structure of a Chart.yaml file. +type Metadata struct { + // The name of the chart. Required. + Name string `json:"name,omitempty"` + // The URL to a relevant project page, git repo, or contact person + Home string `json:"home,omitempty"` + // Source is the URL to the source code of this chart + Sources []string `json:"sources,omitempty"` + // A SemVer 2 conformant version string of the chart. Required. + Version string `json:"version,omitempty"` + // A one-sentence description of the chart + Description string `json:"description,omitempty"` + // A list of string keywords + Keywords []string `json:"keywords,omitempty"` + // A list of name and URL/email address combinations for the maintainer(s) + Maintainers []*Maintainer `json:"maintainers,omitempty"` + // The URL to an icon file. + Icon string `json:"icon,omitempty"` + // The API Version of this chart. Required. + APIVersion string `json:"apiVersion,omitempty"` + // The condition to check to enable chart + Condition string `json:"condition,omitempty"` + // The tags to check to enable chart + Tags string `json:"tags,omitempty"` + // The version of the application enclosed inside of this chart. + AppVersion string `json:"appVersion,omitempty"` + // Whether or not this chart is deprecated + Deprecated bool `json:"deprecated,omitempty"` + // Annotations are additional mappings uninterpreted by Helm, + // made available for inspection by other applications. + Annotations map[string]string `json:"annotations,omitempty"` + // KubeVersion is a SemVer constraint specifying the version of Kubernetes required. + KubeVersion string `json:"kubeVersion,omitempty"` + // Dependencies are a list of dependencies for a chart. + Dependencies []*Dependency `json:"dependencies,omitempty"` + // Specifies the chart type: application or library + Type string `json:"type,omitempty"` +} + +// Validate checks the metadata for known issues and sanitizes string +// characters. +func (md *Metadata) Validate() error { + if md == nil { + return ValidationError("chart.metadata is required") + } + + md.Name = sanitizeString(md.Name) + md.Description = sanitizeString(md.Description) + md.Home = sanitizeString(md.Home) + md.Icon = sanitizeString(md.Icon) + md.Condition = sanitizeString(md.Condition) + md.Tags = sanitizeString(md.Tags) + md.AppVersion = sanitizeString(md.AppVersion) + md.KubeVersion = sanitizeString(md.KubeVersion) + for i := range md.Sources { + md.Sources[i] = sanitizeString(md.Sources[i]) + } + for i := range md.Keywords { + md.Keywords[i] = sanitizeString(md.Keywords[i]) + } + + if md.APIVersion == "" { + return ValidationError("chart.metadata.apiVersion is required") + } + if md.Name == "" { + return ValidationError("chart.metadata.name is required") + } + if md.Version == "" { + return ValidationError("chart.metadata.version is required") + } + if !isValidSemver(md.Version) { + return ValidationErrorf("chart.metadata.version %q is invalid", md.Version) + } + if !isValidChartType(md.Type) { + return ValidationError("chart.metadata.type must be application or library") + } + + for _, m := range md.Maintainers { + if err := m.Validate(); err != nil { + return err + } + } + + // Aliases need to be validated here to make sure that the alias name does + // not contain any illegal characters. + for _, dependency := range md.Dependencies { + if err := dependency.Validate(); err != nil { + return err + } + } + return nil +} + +func isValidChartType(in string) bool { + switch in { + case "", "application", "library": + return true + } + return false +} + +func isValidSemver(v string) bool { + _, err := semver.NewVersion(v) + return err == nil +} + +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go new file mode 100644 index 000000000..5f57e11a5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go @@ -0,0 +1,126 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "strconv" + + "github.com/Masterminds/semver/v3" + "k8s.io/client-go/kubernetes/scheme" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + helmversion "helm.sh/helm/v3/internal/version" +) + +var ( + // The Kubernetes version can be set by LDFLAGS. In order to do that the value + // must be a string. + k8sVersionMajor = "1" + k8sVersionMinor = "20" + + // DefaultVersionSet is the default version set, which includes only Core V1 ("v1"). + DefaultVersionSet = allKnownVersions() + + // DefaultCapabilities is the default set of capabilities. + DefaultCapabilities = &Capabilities{ + KubeVersion: KubeVersion{ + Version: fmt.Sprintf("v%s.%s.0", k8sVersionMajor, k8sVersionMinor), + Major: k8sVersionMajor, + Minor: k8sVersionMinor, + }, + APIVersions: DefaultVersionSet, + HelmVersion: helmversion.Get(), + } +) + +// Capabilities describes the capabilities of the Kubernetes cluster. +type Capabilities struct { + // KubeVersion is the Kubernetes version. + KubeVersion KubeVersion + // APIversions are supported Kubernetes API versions. + APIVersions VersionSet + // HelmVersion is the build information for this helm version + HelmVersion helmversion.BuildInfo +} + +func (capabilities *Capabilities) Copy() *Capabilities { + return &Capabilities{ + KubeVersion: capabilities.KubeVersion, + APIVersions: capabilities.APIVersions, + HelmVersion: capabilities.HelmVersion, + } +} + +// KubeVersion is the Kubernetes version. +type KubeVersion struct { + Version string // Kubernetes version + Major string // Kubernetes major version + Minor string // Kubernetes minor version +} + +// String implements fmt.Stringer +func (kv *KubeVersion) String() string { return kv.Version } + +// GitVersion returns the Kubernetes version string. +// +// Deprecated: use KubeVersion.Version. +func (kv *KubeVersion) GitVersion() string { return kv.Version } + +// ParseKubeVersion parses kubernetes version from string +func ParseKubeVersion(version string) (*KubeVersion, error) { + sv, err := semver.NewVersion(version) + if err != nil { + return nil, err + } + return &KubeVersion{ + Version: "v" + sv.String(), + Major: strconv.FormatUint(sv.Major(), 10), + Minor: strconv.FormatUint(sv.Minor(), 10), + }, nil +} + +// VersionSet is a set of Kubernetes API versions. +type VersionSet []string + +// Has returns true if the version string is in the set. +// +// vs.Has("apps/v1") +func (v VersionSet) Has(apiVersion string) bool { + for _, x := range v { + if x == apiVersion { + return true + } + } + return false +} + +func allKnownVersions() VersionSet { + // We should register the built in extension APIs as well so CRDs are + // supported in the default version set. This has caused problems with `helm + // template` in the past, so let's be safe + apiextensionsv1beta1.AddToScheme(scheme.Scheme) + apiextensionsv1.AddToScheme(scheme.Scheme) + + groups := scheme.Scheme.PrioritizedVersionsAllGroups() + vs := make(VersionSet, 0, len(groups)) + for _, gv := range groups { + vs = append(vs, gv.String()) + } + return vs +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go new file mode 100644 index 000000000..808a902b1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go @@ -0,0 +1,93 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// LoadChartfile loads a Chart.yaml file into a *chart.Metadata. +func LoadChartfile(filename string) (*chart.Metadata, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + y := new(chart.Metadata) + err = yaml.Unmarshal(b, y) + return y, err +} + +// SaveChartfile saves the given metadata as a Chart.yaml file at the given path. +// +// 'filename' should be the complete path and filename ('foo/Chart.yaml') +func SaveChartfile(filename string, cf *chart.Metadata) error { + // Pull out the dependencies of a v1 Chart, since there's no way + // to tell the serializer to skip a field for just this use case + savedDependencies := cf.Dependencies + if cf.APIVersion == chart.APIVersionV1 { + cf.Dependencies = nil + } + out, err := yaml.Marshal(cf) + if cf.APIVersion == chart.APIVersionV1 { + cf.Dependencies = savedDependencies + } + if err != nil { + return err + } + return ioutil.WriteFile(filename, out, 0644) +} + +// IsChartDir validate a chart directory. +// +// Checks for a valid Chart.yaml. +func IsChartDir(dirName string) (bool, error) { + if fi, err := os.Stat(dirName); err != nil { + return false, err + } else if !fi.IsDir() { + return false, errors.Errorf("%q is not a directory", dirName) + } + + chartYaml := filepath.Join(dirName, ChartfileName) + if _, err := os.Stat(chartYaml); os.IsNotExist(err) { + return false, errors.Errorf("no %s exists in directory %q", ChartfileName, dirName) + } + + chartYamlContent, err := ioutil.ReadFile(chartYaml) + if err != nil { + return false, errors.Errorf("cannot read %s in directory %q", ChartfileName, dirName) + } + + chartContent := new(chart.Metadata) + if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil { + return false, err + } + if chartContent == nil { + return false, errors.Errorf("chart metadata (%s) missing", ChartfileName) + } + if chartContent.Name == "" { + return false, errors.Errorf("invalid chart (%s): name must not be empty", ChartfileName) + } + + return true, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go new file mode 100644 index 000000000..f634d6425 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go @@ -0,0 +1,227 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" +) + +func concatPrefix(a, b string) string { + if a == "" { + return b + } + return fmt.Sprintf("%s.%s", a, b) +} + +// CoalesceValues coalesces all of the values in a chart (and its subcharts). +// +// Values are coalesced together using the following rules: +// +// - Values in a higher level chart always override values in a lower-level +// dependency chart +// - Scalar values and arrays are replaced, maps are merged +// - A chart has access to all of the variables for it, as well as all of +// the values destined for its dependencies. +func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { + v, err := copystructure.Copy(vals) + if err != nil { + return vals, err + } + + valsCopy := v.(map[string]interface{}) + // if we have an empty map, make sure it is initialized + if valsCopy == nil { + valsCopy = make(map[string]interface{}) + } + return coalesce(log.Printf, chrt, valsCopy, "") +} + +type printFn func(format string, v ...interface{}) + +// coalesce coalesces the dest values and the chart values, giving priority to the dest values. +// +// This is a helper function for CoalesceValues. +func coalesce(printf printFn, ch *chart.Chart, dest map[string]interface{}, prefix string) (map[string]interface{}, error) { + coalesceValues(printf, ch, dest, prefix) + return coalesceDeps(printf, ch, dest, prefix) +} + +// coalesceDeps coalesces the dependencies of the given chart. +func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}, prefix string) (map[string]interface{}, error) { + for _, subchart := range chrt.Dependencies() { + if c, ok := dest[subchart.Name()]; !ok { + // If dest doesn't already have the key, create it. + dest[subchart.Name()] = make(map[string]interface{}) + } else if !istable(c) { + return dest, errors.Errorf("type mismatch on %s: %t", subchart.Name(), c) + } + if dv, ok := dest[subchart.Name()]; ok { + dvmap := dv.(map[string]interface{}) + subPrefix := concatPrefix(prefix, chrt.Metadata.Name) + + // Get globals out of dest and merge them into dvmap. + coalesceGlobals(printf, dvmap, dest, subPrefix) + + // Now coalesce the rest of the values. + var err error + dest[subchart.Name()], err = coalesce(printf, subchart, dvmap, subPrefix) + if err != nil { + return dest, err + } + } + } + return dest, nil +} + +// coalesceGlobals copies the globals out of src and merges them into dest. +// +// For convenience, returns dest. +func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string) { + var dg, sg map[string]interface{} + + if destglob, ok := dest[GlobalKey]; !ok { + dg = make(map[string]interface{}) + } else if dg, ok = destglob.(map[string]interface{}); !ok { + printf("warning: skipping globals because destination %s is not a table.", GlobalKey) + return + } + + if srcglob, ok := src[GlobalKey]; !ok { + sg = make(map[string]interface{}) + } else if sg, ok = srcglob.(map[string]interface{}); !ok { + printf("warning: skipping globals because source %s is not a table.", GlobalKey) + return + } + + // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This + // reverses that decision. It may somehow be possible to introduce a loop + // here, but I haven't found a way. So for the time being, let's allow + // tables in globals. + for key, val := range sg { + if istable(val) { + vv := copyMap(val.(map[string]interface{})) + if destv, ok := dg[key]; !ok { + // Here there is no merge. We're just adding. + dg[key] = vv + } else { + if destvmap, ok := destv.(map[string]interface{}); !ok { + printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key) + } else { + // Basically, we reverse order of coalesce here to merge + // top-down. + subPrefix := concatPrefix(prefix, key) + coalesceTablesFullKey(printf, vv, destvmap, subPrefix) + dg[key] = vv + } + } + } else if dv, ok := dg[key]; ok && istable(dv) { + // It's not clear if this condition can actually ever trigger. + printf("key %s is table. Skipping", key) + } else { + // TODO: Do we need to do any additional checking on the value? + dg[key] = val + } + } + dest[GlobalKey] = dg +} + +func copyMap(src map[string]interface{}) map[string]interface{} { + m := make(map[string]interface{}, len(src)) + for k, v := range src { + m[k] = v + } + return m +} + +// coalesceValues builds up a values map for a particular chart. +// +// Values in v will override the values in the chart. +func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, prefix string) { + subPrefix := concatPrefix(prefix, c.Metadata.Name) + for key, val := range c.Values { + if value, ok := v[key]; ok { + if value == nil { + // When the YAML value is null, we remove the value's key. + // This allows Helm's various sources of values (value files or --set) to + // remove incompatible keys from any previous chart, file, or set values. + delete(v, key) + } else if dest, ok := value.(map[string]interface{}); ok { + // if v[key] is a table, merge nv's val table into v[key]. + src, ok := val.(map[string]interface{}) + if !ok { + // If the original value is nil, there is nothing to coalesce, so we don't print + // the warning + if val != nil { + printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key) + } + } else { + // Because v has higher precedence than nv, dest values override src + // values. + coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key)) + } + } + } else { + // If the key is not in v, copy it from nv. + v[key] = val + } + } +} + +// CoalesceTables merges a source map into a destination map. +// +// dest is considered authoritative. +func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} { + return coalesceTablesFullKey(log.Printf, dst, src, "") +} + +// coalesceTablesFullKey merges a source map into a destination map. +// +// dest is considered authoritative. +func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, prefix string) map[string]interface{} { + // When --reuse-values is set but there are no modifications yet, return new values + if src == nil { + return dst + } + if dst == nil { + return src + } + // Because dest has higher precedence than src, dest values override src + // values. + for key, val := range src { + fullkey := concatPrefix(prefix, key) + if dv, ok := dst[key]; ok && dv == nil { + delete(dst, key) + } else if !ok { + dst[key] = val + } else if istable(val) { + if istable(dv) { + coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey) + } else { + printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) + } + } else if istable(dv) && val != nil { + printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val) + } + } + return dst +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go new file mode 100644 index 000000000..f4656c913 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go @@ -0,0 +1,34 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import "github.com/Masterminds/semver/v3" + +// IsCompatibleRange compares a version to a constraint. +// It returns true if the version matches the constraint, and false in all other cases. +func IsCompatibleRange(constraint, ver string) bool { + sv, err := semver.NewVersion(ver) + if err != nil { + return false + } + + c, err := semver.NewConstraint(constraint) + if err != nil { + return false + } + return c.Check(sv) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go new file mode 100644 index 000000000..3a8f3cc5a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go @@ -0,0 +1,687 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// chartName is a regular expression for testing the supplied name of a chart. +// This regular expression is probably stricter than it needs to be. We can relax it +// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be +// problematic. +var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$") + +const ( + // ChartfileName is the default Chart file name. + ChartfileName = "Chart.yaml" + // ValuesfileName is the default values file name. + ValuesfileName = "values.yaml" + // SchemafileName is the default values schema file name. + SchemafileName = "values.schema.json" + // TemplatesDir is the relative directory name for templates. + TemplatesDir = "templates" + // ChartsDir is the relative directory name for charts dependencies. + ChartsDir = "charts" + // TemplatesTestsDir is the relative directory name for tests. + TemplatesTestsDir = TemplatesDir + sep + "tests" + // IgnorefileName is the name of the Helm ignore file. + IgnorefileName = ".helmignore" + // IngressFileName is the name of the example ingress file. + IngressFileName = TemplatesDir + sep + "ingress.yaml" + // DeploymentName is the name of the example deployment file. + DeploymentName = TemplatesDir + sep + "deployment.yaml" + // ServiceName is the name of the example service file. + ServiceName = TemplatesDir + sep + "service.yaml" + // ServiceAccountName is the name of the example serviceaccount file. + ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml" + // HorizontalPodAutoscalerName is the name of the example hpa file. + HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml" + // NotesName is the name of the example NOTES.txt file. + NotesName = TemplatesDir + sep + "NOTES.txt" + // HelpersName is the name of the example helpers file. + HelpersName = TemplatesDir + sep + "_helpers.tpl" + // TestConnectionName is the name of the example test file. + TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml" +) + +// maxChartNameLength is lower than the limits we know of with certain file systems, +// and with certain Kubernetes fields. +const maxChartNameLength = 250 + +const sep = string(filepath.Separator) + +const defaultChartfile = `apiVersion: v2 +name: %s +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" +` + +const defaultValues = `# Default values for %s. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} +` + +const defaultIgnore = `# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +` + +const defaultIngress = `{{- if .Values.ingress.enabled -}} +{{- $fullName := include ".fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include ".labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +` + +const defaultDeployment = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include ".selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include ".selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include ".serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +` + +const defaultService = `apiVersion: v1 +kind: Service +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include ".selectorLabels" . | nindent 4 }} +` + +const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include ".serviceAccountName" . }} + labels: + {{- include ".labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +` + +const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} +` + +const defaultNotes = `1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} +` + +const defaultHelpers = `{{/* +Expand the name of the chart. +*/}} +{{- define ".name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define ".labels" -}} +helm.sh/chart: {{ include ".chart" . }} +{{ include ".selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define ".selectorLabels" -}} +app.kubernetes.io/name: {{ include ".name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define ".serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include ".fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} +` + +const defaultTestConnection = `apiVersion: v1 +kind: Pod +metadata: + name: "{{ include ".fullname" . }}-test-connection" + labels: + {{- include ".labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never +` + +// Stderr is an io.Writer to which error messages can be written +// +// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward +// compatibility. +var Stderr io.Writer = os.Stderr + +// CreateFrom creates a new chart, but scaffolds it from the src chart. +func CreateFrom(chartfile *chart.Metadata, dest, src string) error { + schart, err := loader.Load(src) + if err != nil { + return errors.Wrapf(err, "could not load %s", src) + } + + schart.Metadata = chartfile + + var updatedTemplates []*chart.File + + for _, template := range schart.Templates { + newData := transform(string(template.Data), schart.Name()) + updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData}) + } + + schart.Templates = updatedTemplates + b, err := yaml.Marshal(schart.Values) + if err != nil { + return errors.Wrap(err, "reading values file") + } + + var m map[string]interface{} + if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil { + return errors.Wrap(err, "transforming values file") + } + schart.Values = m + + // SaveDir looks for the file values.yaml when saving rather than the values + // key in order to preserve the comments in the YAML. The name placeholder + // needs to be replaced on that file. + for _, f := range schart.Raw { + if f.Name == ValuesfileName { + f.Data = transform(string(f.Data), schart.Name()) + } + } + + return SaveDir(schart, dest) +} + +// Create creates a new chart in a directory. +// +// Inside of dir, this will create a directory based on the name of +// chartfile.Name. It will then write the Chart.yaml into this directory and +// create the (empty) appropriate directories. +// +// The returned string will point to the newly created directory. It will be +// an absolute path, even if the provided base directory was relative. +// +// If dir does not exist, this will return an error. +// If Chart.yaml or any directories cannot be created, this will return an +// error. In such a case, this will attempt to clean up by removing the +// new chart directory. +func Create(name, dir string) (string, error) { + + // Sanity-check the name of a chart so user doesn't create one that causes problems. + if err := validateChartName(name); err != nil { + return "", err + } + + path, err := filepath.Abs(dir) + if err != nil { + return path, err + } + + if fi, err := os.Stat(path); err != nil { + return path, err + } else if !fi.IsDir() { + return path, errors.Errorf("no such directory %s", path) + } + + cdir := filepath.Join(path, name) + if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() { + return cdir, errors.Errorf("file %s already exists and is not a directory", cdir) + } + + files := []struct { + path string + content []byte + }{ + { + // Chart.yaml + path: filepath.Join(cdir, ChartfileName), + content: []byte(fmt.Sprintf(defaultChartfile, name)), + }, + { + // values.yaml + path: filepath.Join(cdir, ValuesfileName), + content: []byte(fmt.Sprintf(defaultValues, name)), + }, + { + // .helmignore + path: filepath.Join(cdir, IgnorefileName), + content: []byte(defaultIgnore), + }, + { + // ingress.yaml + path: filepath.Join(cdir, IngressFileName), + content: transform(defaultIngress, name), + }, + { + // deployment.yaml + path: filepath.Join(cdir, DeploymentName), + content: transform(defaultDeployment, name), + }, + { + // service.yaml + path: filepath.Join(cdir, ServiceName), + content: transform(defaultService, name), + }, + { + // serviceaccount.yaml + path: filepath.Join(cdir, ServiceAccountName), + content: transform(defaultServiceAccount, name), + }, + { + // hpa.yaml + path: filepath.Join(cdir, HorizontalPodAutoscalerName), + content: transform(defaultHorizontalPodAutoscaler, name), + }, + { + // NOTES.txt + path: filepath.Join(cdir, NotesName), + content: transform(defaultNotes, name), + }, + { + // _helpers.tpl + path: filepath.Join(cdir, HelpersName), + content: transform(defaultHelpers, name), + }, + { + // test-connection.yaml + path: filepath.Join(cdir, TestConnectionName), + content: transform(defaultTestConnection, name), + }, + } + + for _, file := range files { + if _, err := os.Stat(file.path); err == nil { + // There is no handle to a preferred output stream here. + fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path) + } + if err := writeFile(file.path, file.content); err != nil { + return cdir, err + } + } + // Need to add the ChartsDir explicitly as it does not contain any file OOTB + if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil { + return cdir, err + } + return cdir, nil +} + +// transform performs a string replacement of the specified source for +// a given key with the replacement string +func transform(src, replacement string) []byte { + return []byte(strings.ReplaceAll(src, "", replacement)) +} + +func writeFile(name string, content []byte) error { + if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil { + return err + } + return ioutil.WriteFile(name, content, 0644) +} + +func validateChartName(name string) error { + if name == "" || len(name) > maxChartNameLength { + return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength) + } + if !chartName.MatchString(name) { + return fmt.Errorf("chart name must match the regular expression %q", chartName.String()) + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go new file mode 100644 index 000000000..e01b95bf7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go @@ -0,0 +1,285 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "log" + "strings" + + "helm.sh/helm/v3/pkg/chart" +) + +// ProcessDependencies checks through this chart's dependencies, processing accordingly. +func ProcessDependencies(c *chart.Chart, v Values) error { + if err := processDependencyEnabled(c, v, ""); err != nil { + return err + } + return processDependencyImportValues(c) +} + +// processDependencyConditions disables charts based on condition path value in values +func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) { + if reqs == nil { + return + } + for _, r := range reqs { + for _, c := range strings.Split(strings.TrimSpace(r.Condition), ",") { + if len(c) > 0 { + // retrieve value + vv, err := cvals.PathValue(cpath + c) + if err == nil { + // if not bool, warn + if bv, ok := vv.(bool); ok { + r.Enabled = bv + break + } else { + log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) + } + } else if _, ok := err.(ErrNoValue); !ok { + // this is a real error + log.Printf("Warning: PathValue returned error %v", err) + } + } + } + } +} + +// processDependencyTags disables charts based on tags in values +func processDependencyTags(reqs []*chart.Dependency, cvals Values) { + if reqs == nil { + return + } + vt, err := cvals.Table("tags") + if err != nil { + return + } + for _, r := range reqs { + var hasTrue, hasFalse bool + for _, k := range r.Tags { + if b, ok := vt[k]; ok { + // if not bool, warn + if bv, ok := b.(bool); ok { + if bv { + hasTrue = true + } else { + hasFalse = true + } + } else { + log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) + } + } + } + if !hasTrue && hasFalse { + r.Enabled = false + } else if hasTrue || !hasTrue && !hasFalse { + r.Enabled = true + } + } +} + +func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart { + for _, c := range charts { + if c == nil { + continue + } + if c.Name() != dep.Name { + continue + } + if !IsCompatibleRange(dep.Version, c.Metadata.Version) { + continue + } + + out := *c + md := *c.Metadata + out.Metadata = &md + + if dep.Alias != "" { + md.Name = dep.Alias + } + return &out + } + return nil +} + +// processDependencyEnabled removes disabled charts from dependencies +func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error { + if c.Metadata.Dependencies == nil { + return nil + } + + var chartDependencies []*chart.Chart + // If any dependency is not a part of Chart.yaml + // then this should be added to chartDependencies. + // However, if the dependency is already specified in Chart.yaml + // we should not add it, as it would be anyways processed from Chart.yaml + +Loop: + for _, existing := range c.Dependencies() { + for _, req := range c.Metadata.Dependencies { + if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) { + continue Loop + } + } + chartDependencies = append(chartDependencies, existing) + } + + for _, req := range c.Metadata.Dependencies { + if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil { + chartDependencies = append(chartDependencies, chartDependency) + } + if req.Alias != "" { + req.Name = req.Alias + } + } + c.SetDependencies(chartDependencies...) + + // set all to true + for _, lr := range c.Metadata.Dependencies { + lr.Enabled = true + } + cvals, err := CoalesceValues(c, v) + if err != nil { + return err + } + // flag dependencies as enabled/disabled + processDependencyTags(c.Metadata.Dependencies, cvals) + processDependencyConditions(c.Metadata.Dependencies, cvals, path) + // make a map of charts to remove + rm := map[string]struct{}{} + for _, r := range c.Metadata.Dependencies { + if !r.Enabled { + // remove disabled chart + rm[r.Name] = struct{}{} + } + } + // don't keep disabled charts in new slice + cd := []*chart.Chart{} + copy(cd, c.Dependencies()[:0]) + for _, n := range c.Dependencies() { + if _, ok := rm[n.Metadata.Name]; !ok { + cd = append(cd, n) + } + } + // don't keep disabled charts in metadata + cdMetadata := []*chart.Dependency{} + copy(cdMetadata, c.Metadata.Dependencies[:0]) + for _, n := range c.Metadata.Dependencies { + if _, ok := rm[n.Name]; !ok { + cdMetadata = append(cdMetadata, n) + } + } + + // recursively call self to process sub dependencies + for _, t := range cd { + subpath := path + t.Metadata.Name + "." + if err := processDependencyEnabled(t, cvals, subpath); err != nil { + return err + } + } + // set the correct dependencies in metadata + c.Metadata.Dependencies = nil + c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...) + c.SetDependencies(cd...) + + return nil +} + +// pathToMap creates a nested map given a YAML path in dot notation. +func pathToMap(path string, data map[string]interface{}) map[string]interface{} { + if path == "." { + return data + } + return set(parsePath(path), data) +} + +func set(path []string, data map[string]interface{}) map[string]interface{} { + if len(path) == 0 { + return nil + } + cur := data + for i := len(path) - 1; i >= 0; i-- { + cur = map[string]interface{}{path[i]: cur} + } + return cur +} + +// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field. +func processImportValues(c *chart.Chart) error { + if c.Metadata.Dependencies == nil { + return nil + } + // combine chart values and empty config to get Values + cvals, err := CoalesceValues(c, nil) + if err != nil { + return err + } + b := make(map[string]interface{}) + // import values from each dependency if specified in import-values + for _, r := range c.Metadata.Dependencies { + var outiv []interface{} + for _, riv := range r.ImportValues { + switch iv := riv.(type) { + case map[string]interface{}: + child := iv["child"].(string) + parent := iv["parent"].(string) + + outiv = append(outiv, map[string]string{ + "child": child, + "parent": parent, + }) + + // get child table + vv, err := cvals.Table(r.Name + "." + child) + if err != nil { + log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err) + continue + } + // create value map from child to be merged into parent + b = CoalesceTables(cvals, pathToMap(parent, vv.AsMap())) + case string: + child := "exports." + iv + outiv = append(outiv, map[string]string{ + "child": child, + "parent": ".", + }) + vm, err := cvals.Table(r.Name + "." + child) + if err != nil { + log.Printf("Warning: ImportValues missing table: %v", err) + continue + } + b = CoalesceTables(b, vm.AsMap()) + } + } + // set our formatted import values + r.ImportValues = outiv + } + + // set the new values + c.Values = CoalesceTables(cvals, b) + + return nil +} + +// processDependencyImportValues imports specified chart values from child to parent. +func processDependencyImportValues(c *chart.Chart) error { + for _, d := range c.Dependencies() { + // recurse + if err := processDependencyImportValues(d); err != nil { + return err + } + } + return processImportValues(c) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go new file mode 100644 index 000000000..8f06bcc9a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go @@ -0,0 +1,44 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package chartutil contains tools for working with charts. + +Charts are described in the chart package (pkg/chart). +This package provides utilities for serializing and deserializing charts. + +A chart can be represented on the file system in one of two ways: + + - As a directory that contains a Chart.yaml file and other chart things. + - As a tarred gzipped file containing a directory that then contains a + Chart.yaml file. + +This package provides utilities for working with those file formats. + +The preferred way of loading a chart is using 'loader.Load`: + + chart, err := loader.Load(filename) + +This will attempt to discover whether the file at 'filename' is a directory or +a chart archive. It will then load accordingly. + +For accepting raw compressed tar file data from an io.Reader, the +'loader.LoadArchive()' will read in the data, uncompress it, and unpack it +into a Chart. + +When creating charts in memory, use the 'helm.sh/helm/pkg/chart' +package directly. +*/ +package chartutil // import "helm.sh/helm/v3/pkg/chartutil" diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go new file mode 100644 index 000000000..fcdcc27ea --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go @@ -0,0 +1,35 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" +) + +// ErrNoTable indicates that a chart does not have a matching table. +type ErrNoTable struct { + Key string +} + +func (e ErrNoTable) Error() string { return fmt.Sprintf("%q is not a table", e.Key) } + +// ErrNoValue indicates that Values does not contain a key with a value +type ErrNoValue struct { + Key string +} + +func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go new file mode 100644 index 000000000..6ad09e417 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go @@ -0,0 +1,91 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// Expand uncompresses and extracts a chart into the specified directory. +func Expand(dir string, r io.Reader) error { + files, err := loader.LoadArchiveFiles(r) + if err != nil { + return err + } + + // Get the name of the chart + var chartName string + for _, file := range files { + if file.Name == "Chart.yaml" { + ch := &chart.Metadata{} + if err := yaml.Unmarshal(file.Data, ch); err != nil { + return errors.Wrap(err, "cannot load Chart.yaml") + } + chartName = ch.Name + } + } + if chartName == "" { + return errors.New("chart name not specified") + } + + // Find the base directory + chartdir, err := securejoin.SecureJoin(dir, chartName) + if err != nil { + return err + } + + // Copy all files verbatim. We don't parse these files because parsing can remove + // comments. + for _, file := range files { + outpath, err := securejoin.SecureJoin(chartdir, file.Name) + if err != nil { + return err + } + + // Make sure the necessary subdirs get created. + basedir := filepath.Dir(outpath) + if err := os.MkdirAll(basedir, 0755); err != nil { + return err + } + + if err := ioutil.WriteFile(outpath, file.Data, 0644); err != nil { + return err + } + } + + return nil +} + +// ExpandFile expands the src file into the dest directory. +func ExpandFile(dest, src string) error { + h, err := os.Open(src) + if err != nil { + return err + } + defer h.Close() + return Expand(dest, h) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go new file mode 100644 index 000000000..7b9768fd3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go @@ -0,0 +1,93 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// ValidateAgainstSchema checks that values does not violate the structure laid out in schema +func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { + var sb strings.Builder + if chrt.Schema != nil { + err := ValidateAgainstSingleSchema(values, chrt.Schema) + if err != nil { + sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) + sb.WriteString(err.Error()) + } + } + + // For each dependency, recursively call this function with the coalesced values + for _, subchart := range chrt.Dependencies() { + subchartValues := values[subchart.Name()].(map[string]interface{}) + if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { + sb.WriteString(err.Error()) + } + } + + if sb.Len() > 0 { + return errors.New(sb.String()) + } + + return nil +} + +// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema +func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error) { + defer func() { + if r := recover(); r != nil { + reterr = fmt.Errorf("unable to validate schema: %s", r) + } + }() + + valuesData, err := yaml.Marshal(values) + if err != nil { + return err + } + valuesJSON, err := yaml.YAMLToJSON(valuesData) + if err != nil { + return err + } + if bytes.Equal(valuesJSON, []byte("null")) { + valuesJSON = []byte("{}") + } + schemaLoader := gojsonschema.NewBytesLoader(schemaJSON) + valuesLoader := gojsonschema.NewBytesLoader(valuesJSON) + + result, err := gojsonschema.Validate(schemaLoader, valuesLoader) + if err != nil { + return err + } + + if !result.Valid() { + var sb strings.Builder + for _, desc := range result.Errors() { + sb.WriteString(fmt.Sprintf("- %s\n", desc)) + } + return errors.New(sb.String()) + } + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/save.go b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go new file mode 100644 index 000000000..2ce4eddaf --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go @@ -0,0 +1,244 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=") + +// SaveDir saves a chart as files in a directory. +// +// This takes the chart name, and creates a new subdirectory inside of the given dest +// directory, writing the chart's contents to that subdirectory. +func SaveDir(c *chart.Chart, dest string) error { + // Create the chart directory + outdir := filepath.Join(dest, c.Name()) + if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() { + return errors.Errorf("file %s already exists and is not a directory", outdir) + } + if err := os.MkdirAll(outdir, 0755); err != nil { + return err + } + + // Save the chart file. + if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil { + return err + } + + // Save values.yaml + for _, f := range c.Raw { + if f.Name == ValuesfileName { + vf := filepath.Join(outdir, ValuesfileName) + if err := writeFile(vf, f.Data); err != nil { + return err + } + } + } + + // Save values.schema.json if it exists + if c.Schema != nil { + filename := filepath.Join(outdir, SchemafileName) + if err := writeFile(filename, c.Schema); err != nil { + return err + } + } + + // Save templates and files + for _, o := range [][]*chart.File{c.Templates, c.Files} { + for _, f := range o { + n := filepath.Join(outdir, f.Name) + if err := writeFile(n, f.Data); err != nil { + return err + } + } + } + + // Save dependencies + base := filepath.Join(outdir, ChartsDir) + for _, dep := range c.Dependencies() { + // Here, we write each dependency as a tar file. + if _, err := Save(dep, base); err != nil { + return errors.Wrapf(err, "saving %s", dep.ChartFullPath()) + } + } + return nil +} + +// Save creates an archived chart to the given directory. +// +// This takes an existing chart and a destination directory. +// +// If the directory is /foo, and the chart is named bar, with version 1.0.0, this +// will generate /foo/bar-1.0.0.tgz. +// +// This returns the absolute path to the chart archive file. +func Save(c *chart.Chart, outDir string) (string, error) { + if err := c.Validate(); err != nil { + return "", errors.Wrap(err, "chart validation") + } + + filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version) + filename = filepath.Join(outDir, filename) + dir := filepath.Dir(filename) + if stat, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if err2 := os.MkdirAll(dir, 0755); err2 != nil { + return "", err2 + } + } else { + return "", errors.Wrapf(err, "stat %s", dir) + } + } else if !stat.IsDir() { + return "", errors.Errorf("is not a directory: %s", dir) + } + + f, err := os.Create(filename) + if err != nil { + return "", err + } + + // Wrap in gzip writer + zipper := gzip.NewWriter(f) + zipper.Header.Extra = headerBytes + zipper.Header.Comment = "Helm" + + // Wrap in tar writer + twriter := tar.NewWriter(zipper) + rollback := false + defer func() { + twriter.Close() + zipper.Close() + f.Close() + if rollback { + os.Remove(filename) + } + }() + + if err := writeTarContents(twriter, c, ""); err != nil { + rollback = true + return filename, err + } + return filename, nil +} + +func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error { + base := filepath.Join(prefix, c.Name()) + + // Pull out the dependencies of a v1 Chart, since there's no way + // to tell the serializer to skip a field for just this use case + savedDependencies := c.Metadata.Dependencies + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Metadata.Dependencies = nil + } + // Save Chart.yaml + cdata, err := yaml.Marshal(c.Metadata) + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Metadata.Dependencies = savedDependencies + } + if err != nil { + return err + } + if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata); err != nil { + return err + } + + // Save Chart.lock + // TODO: remove the APIVersion check when APIVersionV1 is not used anymore + if c.Metadata.APIVersion == chart.APIVersionV2 { + if c.Lock != nil { + ldata, err := yaml.Marshal(c.Lock) + if err != nil { + return err + } + if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata); err != nil { + return err + } + } + } + + // Save values.yaml + for _, f := range c.Raw { + if f.Name == ValuesfileName { + if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data); err != nil { + return err + } + } + } + + // Save values.schema.json if it exists + if c.Schema != nil { + if !json.Valid(c.Schema) { + return errors.New("Invalid JSON in " + SchemafileName) + } + if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema); err != nil { + return err + } + } + + // Save templates + for _, f := range c.Templates { + n := filepath.Join(base, f.Name) + if err := writeToTar(out, n, f.Data); err != nil { + return err + } + } + + // Save files + for _, f := range c.Files { + n := filepath.Join(base, f.Name) + if err := writeToTar(out, n, f.Data); err != nil { + return err + } + } + + // Save dependencies + for _, dep := range c.Dependencies() { + if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil { + return err + } + } + return nil +} + +// writeToTar writes a single file to a tar archive. +func writeToTar(out *tar.Writer, name string, body []byte) error { + // TODO: Do we need to create dummy parent directory names if none exist? + h := &tar.Header{ + Name: filepath.ToSlash(name), + Mode: 0644, + Size: int64(len(body)), + ModTime: time.Now(), + } + if err := out.WriteHeader(h); err != nil { + return err + } + _, err := out.Write(body) + return err +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go new file mode 100644 index 000000000..05c090cb6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go @@ -0,0 +1,112 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "regexp" + + "github.com/pkg/errors" +) + +// validName is a regular expression for resource names. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) + +var ( + // errMissingName indicates that a release (name) was not provided. + errMissingName = errors.New("no name provided") + + // errInvalidName indicates that an invalid release name was provided + errInvalidName = fmt.Errorf( + "invalid release name, must match regex %s and the length must not be longer than 53", + validName.String()) + + // errInvalidKubernetesName indicates that the name does not meet the Kubernetes + // restrictions on metadata names. + errInvalidKubernetesName = fmt.Errorf( + "invalid metadata name, must match regex %s and the length must not be longer than 253", + validName.String()) +) + +const ( + // According to the Kubernetes docs (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names) + // some resource names have a max length of 63 characters while others have a max + // length of 253 characters. As we cannot be sure the resources used in a chart, we + // therefore need to limit it to 63 chars and reserve 10 chars for additional part to name + // of the resource. The reason is that chart maintainers can use release name as part of + // the resource name (and some additional chars). + maxReleaseNameLen = 53 + // maxMetadataNameLen is the maximum length Kubernetes allows for any name. + maxMetadataNameLen = 253 +) + +// ValidateReleaseName performs checks for an entry for a Helm release name +// +// For Helm to allow a name, it must be below a certain character count (53) and also match +// a regular expression. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +func ValidateReleaseName(name string) error { + // This case is preserved for backwards compatibility + if name == "" { + return errMissingName + + } + if len(name) > maxReleaseNameLen || !validName.MatchString(name) { + return errInvalidName + } + return nil +} + +// ValidateMetadataName validates the name field of a Kubernetes metadata object. +// +// Empty strings, strings longer than 253 chars, or strings that don't match the regexp +// will fail. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +// +// Deprecated: remove in Helm 4. Name validation now uses rules defined in +// pkg/lint/rules.validateMetadataNameFunc() +func ValidateMetadataName(name string) error { + if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) { + return errInvalidKubernetesName + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/values.go b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go new file mode 100644 index 000000000..97bf44217 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go @@ -0,0 +1,212 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// GlobalKey is the name of the Values key that is used for storing global vars. +const GlobalKey = "global" + +// Values represents a collection of chart values. +type Values map[string]interface{} + +// YAML encodes the Values into a YAML string. +func (v Values) YAML() (string, error) { + b, err := yaml.Marshal(v) + return string(b), err +} + +// Table gets a table (YAML subsection) from a Values object. +// +// The table is returned as a Values. +// +// Compound table names may be specified with dots: +// +// foo.bar +// +// The above will be evaluated as "The table bar inside the table +// foo". +// +// An ErrNoTable is returned if the table does not exist. +func (v Values) Table(name string) (Values, error) { + table := v + var err error + + for _, n := range parsePath(name) { + if table, err = tableLookup(table, n); err != nil { + break + } + } + return table, err +} + +// AsMap is a utility function for converting Values to a map[string]interface{}. +// +// It protects against nil map panics. +func (v Values) AsMap() map[string]interface{} { + if len(v) == 0 { + return map[string]interface{}{} + } + return v +} + +// Encode writes serialized Values information to the given io.Writer. +func (v Values) Encode(w io.Writer) error { + out, err := yaml.Marshal(v) + if err != nil { + return err + } + _, err = w.Write(out) + return err +} + +func tableLookup(v Values, simple string) (Values, error) { + v2, ok := v[simple] + if !ok { + return v, ErrNoTable{simple} + } + if vv, ok := v2.(map[string]interface{}); ok { + return vv, nil + } + + // This catches a case where a value is of type Values, but doesn't (for some + // reason) match the map[string]interface{}. This has been observed in the + // wild, and might be a result of a nil map of type Values. + if vv, ok := v2.(Values); ok { + return vv, nil + } + + return Values{}, ErrNoTable{simple} +} + +// ReadValues will parse YAML byte data into a Values. +func ReadValues(data []byte) (vals Values, err error) { + err = yaml.Unmarshal(data, &vals) + if len(vals) == 0 { + vals = Values{} + } + return vals, err +} + +// ReadValuesFile will parse a YAML file into a map of values. +func ReadValuesFile(filename string) (Values, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return map[string]interface{}{}, err + } + return ReadValues(data) +} + +// ReleaseOptions represents the additional release options needed +// for the composition of the final values struct +type ReleaseOptions struct { + Name string + Namespace string + Revision int + IsUpgrade bool + IsInstall bool +} + +// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files +// +// This takes both ReleaseOptions and Capabilities to merge into the render values. +func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) { + if caps == nil { + caps = DefaultCapabilities + } + top := map[string]interface{}{ + "Chart": chrt.Metadata, + "Capabilities": caps, + "Release": map[string]interface{}{ + "Name": options.Name, + "Namespace": options.Namespace, + "IsUpgrade": options.IsUpgrade, + "IsInstall": options.IsInstall, + "Revision": options.Revision, + "Service": "Helm", + }, + } + + vals, err := CoalesceValues(chrt, chrtVals) + if err != nil { + return top, err + } + + if err := ValidateAgainstSchema(chrt, vals); err != nil { + errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s" + return top, fmt.Errorf(errFmt, err.Error()) + } + + top["Values"] = vals + return top, nil +} + +// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. +func istable(v interface{}) bool { + _, ok := v.(map[string]interface{}) + return ok +} + +// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path. +// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods. +// Given the following YAML data the value at path "chapter.one.title" is "Loomings". +// +// chapter: +// one: +// title: "Loomings" +func (v Values) PathValue(path string) (interface{}, error) { + if path == "" { + return nil, errors.New("YAML path cannot be empty") + } + return v.pathValue(parsePath(path)) +} + +func (v Values) pathValue(path []string) (interface{}, error) { + if len(path) == 1 { + // if exists must be root key not table + if _, ok := v[path[0]]; ok && !istable(v[path[0]]) { + return v[path[0]], nil + } + return nil, ErrNoValue{path[0]} + } + + key, path := path[len(path)-1], path[:len(path)-1] + // get our table for table path + t, err := v.Table(joinPath(path...)) + if err != nil { + return nil, ErrNoValue{key} + } + // check table for key and ensure value is not a table + if k, ok := t[key]; ok && !istable(k) { + return k, nil + } + return nil, ErrNoValue{key} +} + +func parsePath(key string) []string { return strings.Split(key, ".") } + +func joinPath(path ...string) string { return strings.Join(path, ".") } diff --git a/vendor/helm.sh/helm/v3/pkg/engine/doc.go b/vendor/helm.sh/helm/v3/pkg/engine/doc.go new file mode 100644 index 000000000..6ff875c46 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/doc.go @@ -0,0 +1,23 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package engine implements the Go text template engine as needed for Helm. + +When Helm renders templates it does so with additional functions and different +modes (e.g., strict, lint mode). This package handles the helm specific +implementation. +*/ +package engine // import "helm.sh/helm/v3/pkg/engine" diff --git a/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/vendor/helm.sh/helm/v3/pkg/engine/engine.go new file mode 100644 index 000000000..657d5767b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/engine.go @@ -0,0 +1,418 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "fmt" + "log" + "path" + "path/filepath" + "regexp" + "sort" + "strings" + "text/template" + + "github.com/pkg/errors" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" +) + +// Engine is an implementation of the Helm rendering implementation for templates. +type Engine struct { + // If strict is enabled, template rendering will fail if a template references + // a value that was not passed in. + Strict bool + // In LintMode, some 'required' template values may be missing, so don't fail + LintMode bool + // the rest config to connect to the kubernetes api + config *rest.Config + // EnableDNS tells the engine to allow DNS lookups when rendering templates + EnableDNS bool +} + +// New creates a new instance of Engine using the passed in rest config. +func New(config *rest.Config) Engine { + return Engine{ + config: config, + } +} + +// Render takes a chart, optional values, and value overrides, and attempts to render the Go templates. +// +// Render can be called repeatedly on the same engine. +// +// This will look in the chart's 'templates' data (e.g. the 'templates/' directory) +// and attempt to render the templates there using the values passed in. +// +// Values are scoped to their templates. A dependency template will not have +// access to the values set for its parent. If chart "foo" includes chart "bar", +// "bar" will not have access to the values for "foo". +// +// Values should be prepared with something like `chartutils.ReadValues`. +// +// Values are passed through the templates according to scope. If the top layer +// chart includes the chart foo, which includes the chart bar, the values map +// will be examined for a table called "foo". If "foo" is found in vals, +// that section of the values will be passed into the "foo" chart. And if that +// section contains a value named "bar", that value will be passed on to the +// bar chart during render time. +func (e Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + tmap := allTemplates(chrt, values) + return e.render(tmap) +} + +// Render takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. +func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + return new(Engine).Render(chrt, values) +} + +// RenderWithClient takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. This engine is client aware and so can have template +// functions that interact with the client +func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) { + return Engine{ + config: config, + }.Render(chrt, values) +} + +// renderable is an object that can be rendered. +type renderable struct { + // tpl is the current template. + tpl string + // vals are the values to be supplied to the template. + vals chartutil.Values + // namespace prefix to the templates of the current chart + basePath string +} + +const warnStartDelim = "HELM_ERR_START" +const warnEndDelim = "HELM_ERR_END" +const recursionMaxNums = 1000 + +var warnRegex = regexp.MustCompile(warnStartDelim + `((?s).*)` + warnEndDelim) + +func warnWrap(warn string) string { + return warnStartDelim + warn + warnEndDelim +} + +// initFunMap creates the Engine's FuncMap and adds context-specific functions. +func (e Engine) initFunMap(t *template.Template, referenceTpls map[string]renderable) { + funcMap := funcMap() + includedNames := make(map[string]int) + + // Add the 'include' function here so we can close over t. + funcMap["include"] = func(name string, data interface{}) (string, error) { + var buf strings.Builder + if v, ok := includedNames[name]; ok { + if v > recursionMaxNums { + return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name) + } + includedNames[name]++ + } else { + includedNames[name] = 1 + } + err := t.ExecuteTemplate(&buf, name, data) + includedNames[name]-- + return buf.String(), err + } + + // Add the 'tpl' function here + funcMap["tpl"] = func(tpl string, vals chartutil.Values) (string, error) { + basePath, err := vals.PathValue("Template.BasePath") + if err != nil { + return "", errors.Wrapf(err, "cannot retrieve Template.Basepath from values inside tpl function: %s", tpl) + } + + templateName, err := vals.PathValue("Template.Name") + if err != nil { + return "", errors.Wrapf(err, "cannot retrieve Template.Name from values inside tpl function: %s", tpl) + } + + templates := map[string]renderable{ + templateName.(string): { + tpl: tpl, + vals: vals, + basePath: basePath.(string), + }, + } + + result, err := e.renderWithReferences(templates, referenceTpls) + if err != nil { + return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl) + } + return result[templateName.(string)], nil + } + + // Add the `required` function here so we can use lintMode + funcMap["required"] = func(warn string, val interface{}) (interface{}, error) { + if val == nil { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.Errorf(warnWrap(warn)) + } else if _, ok := val.(string); ok { + if val == "" { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.Errorf(warnWrap(warn)) + } + } + return val, nil + } + + // Override sprig fail function for linting and wrapping message + funcMap["fail"] = func(msg string) (string, error) { + if e.LintMode { + // Don't fail when linting + log.Printf("[INFO] Fail: %s", msg) + return "", nil + } + return "", errors.New(warnWrap(msg)) + } + + // If we are not linting and have a cluster connection, provide a Kubernetes-backed + // implementation. + if !e.LintMode && e.config != nil { + funcMap["lookup"] = NewLookupFunction(e.config) + } + + // When DNS lookups are not enabled override the sprig function and return + // an empty string. + if !e.EnableDNS { + funcMap["getHostByName"] = func(name string) string { + return "" + } + } + + t.Funcs(funcMap) +} + +// render takes a map of templates/values and renders them. +func (e Engine) render(tpls map[string]renderable) (map[string]string, error) { + return e.renderWithReferences(tpls, tpls) +} + +// renderWithReferences takes a map of templates/values to render, and a map of +// templates which can be referenced within them. +func (e Engine) renderWithReferences(tpls, referenceTpls map[string]renderable) (rendered map[string]string, err error) { + // Basically, what we do here is start with an empty parent template and then + // build up a list of templates -- one for each file. Once all of the templates + // have been parsed, we loop through again and execute every template. + // + // The idea with this process is to make it possible for more complex templates + // to share common blocks, but to make the entire thing feel like a file-based + // template engine. + defer func() { + if r := recover(); r != nil { + err = errors.Errorf("rendering template failed: %v", r) + } + }() + t := template.New("gotpl") + if e.Strict { + t.Option("missingkey=error") + } else { + // Not that zero will attempt to add default values for types it knows, + // but will still emit for others. We mitigate that later. + t.Option("missingkey=zero") + } + + e.initFunMap(t, referenceTpls) + + // We want to parse the templates in a predictable order. The order favors + // higher-level (in file system) templates over deeply nested templates. + keys := sortTemplates(tpls) + referenceKeys := sortTemplates(referenceTpls) + + for _, filename := range keys { + r := tpls[filename] + if _, err := t.New(filename).Parse(r.tpl); err != nil { + return map[string]string{}, cleanupParseError(filename, err) + } + } + + // Adding the reference templates to the template context + // so they can be referenced in the tpl function + for _, filename := range referenceKeys { + if t.Lookup(filename) == nil { + r := referenceTpls[filename] + if _, err := t.New(filename).Parse(r.tpl); err != nil { + return map[string]string{}, cleanupParseError(filename, err) + } + } + } + + rendered = make(map[string]string, len(keys)) + for _, filename := range keys { + // Don't render partials. We don't care out the direct output of partials. + // They are only included from other templates. + if strings.HasPrefix(path.Base(filename), "_") { + continue + } + // At render time, add information about the template that is being rendered. + vals := tpls[filename].vals + vals["Template"] = chartutil.Values{"Name": filename, "BasePath": tpls[filename].basePath} + var buf strings.Builder + if err := t.ExecuteTemplate(&buf, filename, vals); err != nil { + return map[string]string{}, cleanupExecError(filename, err) + } + + // Work around the issue where Go will emit "" even if Options(missing=zero) + // is set. Since missing=error will never get here, we do not need to handle + // the Strict case. + rendered[filename] = strings.ReplaceAll(buf.String(), "", "") + } + + return rendered, nil +} + +func cleanupParseError(filename string, err error) error { + tokens := strings.Split(err.Error(), ": ") + if len(tokens) == 1 { + // This might happen if a non-templating error occurs + return fmt.Errorf("parse error in (%s): %s", filename, err) + } + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + // The remaining tokens make up a stacktrace-like chain, ending with the relevant error + errMsg := tokens[len(tokens)-1] + return fmt.Errorf("parse error at (%s): %s", string(location), errMsg) +} + +func cleanupExecError(filename string, err error) error { + if _, isExecError := err.(template.ExecError); !isExecError { + return err + } + + tokens := strings.SplitN(err.Error(), ": ", 3) + if len(tokens) != 3 { + // This might happen if a non-templating error occurs + return fmt.Errorf("execution error in (%s): %s", filename, err) + } + + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + + parts := warnRegex.FindStringSubmatch(tokens[2]) + if len(parts) >= 2 { + return fmt.Errorf("execution error at (%s): %s", string(location), parts[1]) + } + + return err +} + +func sortTemplates(tpls map[string]renderable) []string { + keys := make([]string, len(tpls)) + i := 0 + for key := range tpls { + keys[i] = key + i++ + } + sort.Sort(sort.Reverse(byPathLen(keys))) + return keys +} + +type byPathLen []string + +func (p byPathLen) Len() int { return len(p) } +func (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] } +func (p byPathLen) Less(i, j int) bool { + a, b := p[i], p[j] + ca, cb := strings.Count(a, "/"), strings.Count(b, "/") + if ca == cb { + return strings.Compare(a, b) == -1 + } + return ca < cb +} + +// allTemplates returns all templates for a chart and its dependencies. +// +// As it goes, it also prepares the values in a scope-sensitive manner. +func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable { + templates := make(map[string]renderable) + recAllTpls(c, templates, vals) + return templates +} + +// recAllTpls recurses through the templates in a chart. +// +// As it recurses, it also sets the values to be appropriate for the template +// scope. +func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.Values) map[string]interface{} { + subCharts := make(map[string]interface{}) + chartMetaData := struct { + chart.Metadata + IsRoot bool + }{*c.Metadata, c.IsRoot()} + + next := map[string]interface{}{ + "Chart": chartMetaData, + "Files": newFiles(c.Files), + "Release": vals["Release"], + "Capabilities": vals["Capabilities"], + "Values": make(chartutil.Values), + "Subcharts": subCharts, + } + + // If there is a {{.Values.ThisChart}} in the parent metadata, + // copy that into the {{.Values}} for this template. + if c.IsRoot() { + next["Values"] = vals["Values"] + } else if vs, err := vals.Table("Values." + c.Name()); err == nil { + next["Values"] = vs + } + + for _, child := range c.Dependencies() { + subCharts[child.Name()] = recAllTpls(child, templates, next) + } + + newParentID := c.ChartFullPath() + for _, t := range c.Templates { + if !isTemplateValid(c, t.Name) { + continue + } + templates[path.Join(newParentID, t.Name)] = renderable{ + tpl: string(t.Data), + vals: next, + basePath: path.Join(newParentID, "templates"), + } + } + + return next +} + +// isTemplateValid returns true if the template is valid for the chart type +func isTemplateValid(ch *chart.Chart, templateName string) bool { + if isLibraryChart(ch) { + return strings.HasPrefix(filepath.Base(templateName), "_") + } + return true +} + +// isLibraryChart returns true if the chart is a library chart +func isLibraryChart(c *chart.Chart) bool { + return strings.EqualFold(c.Metadata.Type, "library") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/files.go b/vendor/helm.sh/helm/v3/pkg/engine/files.go new file mode 100644 index 000000000..d7e62da5a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/files.go @@ -0,0 +1,160 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "encoding/base64" + "path" + "strings" + + "github.com/gobwas/glob" + + "helm.sh/helm/v3/pkg/chart" +) + +// files is a map of files in a chart that can be accessed from a template. +type files map[string][]byte + +// NewFiles creates a new files from chart files. +// Given an []*chart.File (the format for files in a chart.Chart), extract a map of files. +func newFiles(from []*chart.File) files { + files := make(map[string][]byte) + for _, f := range from { + files[f.Name] = f.Data + } + return files +} + +// GetBytes gets a file by path. +// +// The returned data is raw. In a template context, this is identical to calling +// {{index .Files $path}}. +// +// This is intended to be accessed from within a template, so a missed key returns +// an empty []byte. +func (f files) GetBytes(name string) []byte { + if v, ok := f[name]; ok { + return v + } + return []byte{} +} + +// Get returns a string representation of the given file. +// +// Fetch the contents of a file as a string. It is designed to be called in a +// template. +// +// {{.Files.Get "foo"}} +func (f files) Get(name string) string { + return string(f.GetBytes(name)) +} + +// Glob takes a glob pattern and returns another files object only containing +// matched files. +// +// This is designed to be called from a template. +// +// {{ range $name, $content := .Files.Glob("foo/**") }} +// {{ $name }}: | +// {{ .Files.Get($name) | indent 4 }}{{ end }} +func (f files) Glob(pattern string) files { + g, err := glob.Compile(pattern, '/') + if err != nil { + g, _ = glob.Compile("**") + } + + nf := newFiles(nil) + for name, contents := range f { + if g.Match(name) { + nf[name] = contents + } + } + + return nf +} + +// AsConfig turns a Files group and flattens it to a YAML map suitable for +// including in the 'data' section of a Kubernetes ConfigMap definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// {{ .Files.Glob("config/**").AsConfig() | indent 4 }} +func (f files) AsConfig() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + // Explicitly convert to strings, and file names + for k, v := range f { + m[path.Base(k)] = string(v) + } + + return toYAML(m) +} + +// AsSecrets returns the base64-encoded value of a Files object suitable for +// including in the 'data' section of a Kubernetes Secret definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// {{ .Files.Glob("secrets/*").AsSecrets() }} +func (f files) AsSecrets() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + for k, v := range f { + m[path.Base(k)] = base64.StdEncoding.EncodeToString(v) + } + + return toYAML(m) +} + +// Lines returns each line of a named file (split by "\n") as a slice, so it can +// be ranged over in your templates. +// +// This is designed to be called from a template. +// +// {{ range .Files.Lines "foo/bar.html" }} +// {{ . }}{{ end }} +func (f files) Lines(path string) []string { + if f == nil || f[path] == nil { + return []string{} + } + + return strings.Split(string(f[path]), "\n") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/funcs.go b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go new file mode 100644 index 000000000..92b4c3383 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go @@ -0,0 +1,177 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" + + "github.com/BurntSushi/toml" + "github.com/Masterminds/sprig/v3" + "sigs.k8s.io/yaml" +) + +// funcMap returns a mapping of all of the functions that Engine has. +// +// Because some functions are late-bound (e.g. contain context-sensitive +// data), the functions may not all perform identically outside of an Engine +// as they will inside of an Engine. +// +// Known late-bound functions: +// +// - "include" +// - "tpl" +// +// These are late-bound in Engine.Render(). The +// version included in the FuncMap is a placeholder. +// +func funcMap() template.FuncMap { + f := sprig.TxtFuncMap() + delete(f, "env") + delete(f, "expandenv") + + // Add some extra functionality + extra := template.FuncMap{ + "toToml": toTOML, + "toYaml": toYAML, + "fromYaml": fromYAML, + "fromYamlArray": fromYAMLArray, + "toJson": toJSON, + "fromJson": fromJSON, + "fromJsonArray": fromJSONArray, + + // This is a placeholder for the "include" function, which is + // late-bound to a template. By declaring it here, we preserve the + // integrity of the linter. + "include": func(string, interface{}) string { return "not implemented" }, + "tpl": func(string, interface{}) interface{} { return "not implemented" }, + "required": func(string, interface{}) (interface{}, error) { return "not implemented", nil }, + // Provide a placeholder for the "lookup" function, which requires a kubernetes + // connection. + "lookup": func(string, string, string, string) (map[string]interface{}, error) { + return map[string]interface{}{}, nil + }, + } + + for k, v := range extra { + f[k] = v + } + + return f +} + +// toYAML takes an interface, marshals it to yaml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toYAML(v interface{}) string { + data, err := yaml.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return strings.TrimSuffix(string(data), "\n") +} + +// fromYAML converts a YAML document into a map[string]interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromYAML(str string) map[string]interface{} { + m := map[string]interface{}{} + + if err := yaml.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromYAMLArray converts a YAML array into a []interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromYAMLArray(str string) []interface{} { + a := []interface{}{} + + if err := yaml.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} + +// toTOML takes an interface, marshals it to toml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toTOML(v interface{}) string { + b := bytes.NewBuffer(nil) + e := toml.NewEncoder(b) + err := e.Encode(v) + if err != nil { + return err.Error() + } + return b.String() +} + +// toJSON takes an interface, marshals it to json, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toJSON(v interface{}) string { + data, err := json.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return string(data) +} + +// fromJSON converts a JSON document into a map[string]interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromJSON(str string) map[string]interface{} { + m := make(map[string]interface{}) + + if err := json.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromJSONArray converts a JSON array into a []interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromJSONArray(str string) []interface{} { + a := []interface{}{} + + if err := json.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go new file mode 100644 index 000000000..b378ca9d6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go @@ -0,0 +1,124 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "log" + "strings" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) + +// NewLookupFunction returns a function for looking up objects in the cluster. +// +// If the resource does not exist, no error is raised. +// +// This function is considered deprecated, and will be renamed in Helm 4. It will no +// longer be a public function. +func NewLookupFunction(config *rest.Config) lookupFunc { + return func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) { + var client dynamic.ResourceInterface + c, namespaced, err := getDynamicClientOnKind(apiversion, resource, config) + if err != nil { + return map[string]interface{}{}, err + } + if namespaced && namespace != "" { + client = c.Namespace(namespace) + } else { + client = c + } + if name != "" { + // this will return a single object + obj, err := client.Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } + // this will return a list + obj, err := client.List(context.Background(), metav1.ListOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } +} + +// getDynamicClientOnKind returns a dynamic client on an Unstructured type. This client can be further namespaced. +func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) { + gvk := schema.FromAPIVersionAndKind(apiversion, kind) + apiRes, err := getAPIResourceForGVK(gvk, config) + if err != nil { + log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) + } + gvr := schema.GroupVersionResource{ + Group: apiRes.Group, + Version: apiRes.Version, + Resource: apiRes.Name, + } + intf, err := dynamic.NewForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to get dynamic client %s", err) + return nil, false, err + } + res := intf.Resource(gvr) + return res, apiRes.Namespaced, nil +} + +func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (metav1.APIResource, error) { + res := metav1.APIResource{} + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to create discovery client %s", err) + return res, err + } + resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + return res, err + } + for _, resource := range resList.APIResources { + // if a resource contains a "/" it's referencing a subresource. we don't support suberesource for now. + if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") { + res = resource + res.Group = gvk.Group + res.Version = gvk.Version + break + } + } + return res, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 53256f776..cd0a41a7e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,13 +1,26 @@ # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 +# github.com/BurntSushi/toml v1.2.1 +## explicit; go 1.16 +github.com/BurntSushi/toml +github.com/BurntSushi/toml/internal +# github.com/Masterminds/goutils v1.1.1 +## explicit +github.com/Masterminds/goutils +# github.com/Masterminds/semver/v3 v3.2.0 +## explicit; go 1.18 +github.com/Masterminds/semver/v3 +# github.com/Masterminds/sprig/v3 v3.2.3 +## explicit; go 1.13 +github.com/Masterminds/sprig/v3 # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler # github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 ## explicit; go 1.16 github.com/antlr/antlr4/runtime/Go/antlr -# github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a -## explicit +# github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 +## explicit; go 1.12 github.com/asaskevich/govalidator # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 @@ -28,6 +41,9 @@ github.com/coreos/go-semver/semver ## explicit; go 1.12 github.com/coreos/go-systemd/v22/daemon github.com/coreos/go-systemd/v22/journal +# github.com/cyphar/filepath-securejoin v0.2.3 +## explicit; go 1.13 +github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -35,12 +51,15 @@ github.com/davecgh/go-spew/spew ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/evanphx/json-patch v4.12.0+incompatible +# github.com/evanphx/json-patch v5.6.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.6.0 ## explicit; go 1.12 github.com/evanphx/json-patch/v5 +# github.com/fatih/structs v1.1.0 +## explicit +github.com/fatih/structs # github.com/felixge/httpsnoop v1.0.3 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -73,6 +92,16 @@ github.com/go-openapi/swag # github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 ## explicit; go 1.13 github.com/go-task/slim-sprig +# github.com/gobwas/glob v0.2.3 +## explicit +github.com/gobwas/glob +github.com/gobwas/glob/compiler +github.com/gobwas/glob/match +github.com/gobwas/glob/syntax +github.com/gobwas/glob/syntax/ast +github.com/gobwas/glob/syntax/lexer +github.com/gobwas/glob/util/runes +github.com/gobwas/glob/util/strings # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/gogoproto @@ -142,6 +171,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities +# github.com/huandu/xstrings v1.3.3 +## explicit; go 1.12 +github.com/huandu/xstrings # github.com/imdario/mergo v0.3.12 ## explicit; go 1.13 github.com/imdario/mergo @@ -162,9 +194,15 @@ github.com/mailru/easyjson/jwriter # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/mitchellh/copystructure v1.2.0 +## explicit; go 1.15 +github.com/mitchellh/copystructure # github.com/mitchellh/mapstructure v1.4.1 ## explicit; go 1.14 github.com/mitchellh/mapstructure +# github.com/mitchellh/reflectwalk v1.0.2 +## explicit +github.com/mitchellh/reflectwalk # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -351,9 +389,15 @@ github.com/prometheus/procfs/internal/util # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron +# github.com/shopspring/decimal v1.2.0 +## explicit; go 1.13 +github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus +# github.com/spf13/cast v1.4.1 +## explicit +github.com/spf13/cast # github.com/spf13/cobra v1.6.1 ## explicit; go 1.15 github.com/spf13/cobra @@ -367,6 +411,21 @@ github.com/stoewer/go-strcase ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require +# github.com/valyala/bytebufferpool v1.0.0 +## explicit +github.com/valyala/bytebufferpool +# github.com/valyala/fasttemplate v1.2.2 +## explicit; go 1.12 +github.com/valyala/fasttemplate +# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f +## explicit +github.com/xeipuuv/gojsonpointer +# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +## explicit +github.com/xeipuuv/gojsonreference +# github.com/xeipuuv/gojsonschema v1.2.0 +## explicit +github.com/xeipuuv/gojsonschema # go.etcd.io/etcd/api/v3 v3.5.7 ## explicit; go 1.17 go.etcd.io/etcd/api/v3/authpb @@ -464,14 +523,18 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.1.0 +# golang.org/x/crypto v0.5.0 ## explicit; go 1.17 +golang.org/x/crypto/bcrypt +golang.org/x/crypto/blowfish golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/nacl/secretbox +golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa +golang.org/x/crypto/scrypt # golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 ## explicit; go 1.20 golang.org/x/exp/constraints @@ -661,6 +724,15 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# helm.sh/helm/v3 v3.11.1 +## explicit; go 1.17 +helm.sh/helm/v3/internal/ignore +helm.sh/helm/v3/internal/sympath +helm.sh/helm/v3/internal/version +helm.sh/helm/v3/pkg/chart +helm.sh/helm/v3/pkg/chart/loader +helm.sh/helm/v3/pkg/chartutil +helm.sh/helm/v3/pkg/engine # k8s.io/api v0.27.2 ## explicit; go 1.20 k8s.io/api/admission/v1 @@ -1334,6 +1406,26 @@ k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace +# open-cluster-management.io/addon-framework v0.7.1-0.20230626092851-963716af4eed +## explicit; go 1.19 +open-cluster-management.io/addon-framework/pkg/addonfactory +open-cluster-management.io/addon-framework/pkg/addonmanager +open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting +open-cluster-management.io/addon-framework/pkg/addonmanager/constants +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration +open-cluster-management.io/addon-framework/pkg/agent +open-cluster-management.io/addon-framework/pkg/assets +open-cluster-management.io/addon-framework/pkg/basecontroller/events +open-cluster-management.io/addon-framework/pkg/basecontroller/factory +open-cluster-management.io/addon-framework/pkg/index +open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration +open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner +open-cluster-management.io/addon-framework/pkg/utils # open-cluster-management.io/api v0.11.1-0.20230609103311-088e8fe86139 ## explicit; go 1.19 open-cluster-management.io/api/addon/v1alpha1 @@ -1401,6 +1493,7 @@ open-cluster-management.io/api/crdsv1beta1 open-cluster-management.io/api/feature open-cluster-management.io/api/operator/v1 open-cluster-management.io/api/utils/work/v1/workapplier +open-cluster-management.io/api/utils/work/v1/workbuilder open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 diff --git a/vendor/open-cluster-management.io/addon-framework/LICENSE b/vendor/open-cluster-management.io/addon-framework/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addondeploymentconfig.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addondeploymentconfig.go new file mode 100644 index 000000000..331eb5327 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addondeploymentconfig.go @@ -0,0 +1,301 @@ +package addonfactory + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/utils" +) + +// AddOnDeloymentConfigToValuesFunc transform the AddOnDeploymentConfig object into Values object +// The transformation logic depends on the definition of the addon template +// Deprecated: use AddOnDeploymentConfigToValuesFunc instead. +type AddOnDeloymentConfigToValuesFunc func(config addonapiv1alpha1.AddOnDeploymentConfig) (Values, error) + +// AddOnDeloymentConfigGetter has a method to return a AddOnDeploymentConfig object +// Deprecated: use AddOnDeploymentConfigGetter instead. +type AddOnDeloymentConfigGetter interface { + Get(ctx context.Context, namespace, name string) (*addonapiv1alpha1.AddOnDeploymentConfig, error) +} + +type defaultAddOnDeploymentConfigGetter struct { + addonClient addonv1alpha1client.Interface +} + +func (g *defaultAddOnDeploymentConfigGetter) Get( + ctx context.Context, namespace, name string) (*addonapiv1alpha1.AddOnDeploymentConfig, error) { + return g.addonClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// NewAddOnDeloymentConfigGetter returns a AddOnDeloymentConfigGetter with addon client +// Deprecated: use NewAddOnDeploymentConfigGetter instead. +func NewAddOnDeloymentConfigGetter(addonClient addonv1alpha1client.Interface) AddOnDeloymentConfigGetter { + return &defaultAddOnDeploymentConfigGetter{addonClient: addonClient} +} + +// GetAddOnDeloymentConfigValues uses AddOnDeloymentConfigGetter to get the AddOnDeploymentConfig object, then +// uses AddOnDeloymentConfigToValuesFunc to transform the AddOnDeploymentConfig object to Values object +// If there are multiple AddOnDeploymentConfig objects in the AddOn ConfigReferences, the big index object will +// override the one from small index +// Deprecated: use GetAddOnDeploymentConfigValues instead. +func GetAddOnDeloymentConfigValues( + getter AddOnDeloymentConfigGetter, toValuesFuncs ...AddOnDeloymentConfigToValuesFunc) GetValuesFunc { + return func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) { + var lastValues = Values{} + for _, config := range addon.Status.ConfigReferences { + if config.ConfigGroupResource.Group != utils.AddOnDeploymentConfigGVR.Group || + config.ConfigGroupResource.Resource != utils.AddOnDeploymentConfigGVR.Resource { + continue + } + + addOnDeploymentConfig, err := getter.Get(context.Background(), config.Namespace, config.Name) + if err != nil { + return nil, err + } + + for _, toValuesFunc := range toValuesFuncs { + values, err := toValuesFunc(*addOnDeploymentConfig) + if err != nil { + return nil, err + } + lastValues = MergeValues(lastValues, values) + } + } + + return lastValues, nil + } +} + +// ToAddOnDeloymentConfigValues transform the AddOnDeploymentConfig object into Values object that is a plain value map +// for example: the spec of one AddOnDeploymentConfig is: +// +// { +// customizedVariables: [{name: "Image", value: "img"}, {name: "ImagePullPolicy", value: "Always"}], +// nodePlacement: {nodeSelector: {"host": "ssd"}, tolerations: {"key": "test"}}, +// } +// +// after transformed, the key set of Values object will be: {"Image", "ImagePullPolicy", "NodeSelector", "Tolerations"} +// Deprecated: use ToAddOnDeploymentConfigValues instead. +func ToAddOnDeloymentConfigValues(config addonapiv1alpha1.AddOnDeploymentConfig) (Values, error) { + values, err := ToAddOnCustomizedVariableValues(config) + if err != nil { + return nil, err + } + + if config.Spec.NodePlacement != nil { + values["NodeSelector"] = config.Spec.NodePlacement.NodeSelector + values["Tolerations"] = config.Spec.NodePlacement.Tolerations + } + + return values, nil +} + +// ToAddOnNodePlacementValues only transform the AddOnDeploymentConfig NodePlacement part into Values object that has +// a specific for helm chart values +// for example: the spec of one AddOnDeploymentConfig is: +// +// { +// nodePlacement: {nodeSelector: {"host": "ssd"}, tolerations: {"key":"test"}}, +// } +// +// after transformed, the Values will be: +// map[global:map[nodeSelector:map[host:ssd]] tolerations:[map[key:test]]] +func ToAddOnNodePlacementValues(config addonapiv1alpha1.AddOnDeploymentConfig) (Values, error) { + if config.Spec.NodePlacement == nil { + return nil, nil + } + + type global struct { + NodeSelector map[string]string `json:"nodeSelector"` + } + + jsonStruct := struct { + Tolerations []corev1.Toleration `json:"tolerations"` + Global global `json:"global"` + }{ + Tolerations: config.Spec.NodePlacement.Tolerations, + Global: global{ + NodeSelector: config.Spec.NodePlacement.NodeSelector, + }, + } + + values, err := JsonStructToValues(jsonStruct) + if err != nil { + return nil, err + } + + return values, nil +} + +// ToAddOnCustomizedVariableValues only transform the CustomizedVariables in the spec of AddOnDeploymentConfig into Values object. +// for example: the spec of one AddOnDeploymentConfig is: +// +// { +// customizedVariables: [{name: "a", value: "x"}, {name: "b", value: "y"}], +// } +// +// after transformed, the Values will be: +// map[a:x b:y] +func ToAddOnCustomizedVariableValues(config addonapiv1alpha1.AddOnDeploymentConfig) (Values, error) { + values := Values{} + for _, variable := range config.Spec.CustomizedVariables { + values[variable.Name] = variable.Value + } + + return values, nil +} + +// AddOnDeploymentConfigToValuesFunc transform the AddOnDeploymentConfig object into Values object +// The transformation logic depends on the definition of the addon template +type AddOnDeploymentConfigToValuesFunc func(config addonapiv1alpha1.AddOnDeploymentConfig) (Values, error) + +// AddOnDeploymentConfigGetter has a method to return a AddOnDeploymentConfig object +type AddOnDeploymentConfigGetter interface { + Get(ctx context.Context, namespace, name string) (*addonapiv1alpha1.AddOnDeploymentConfig, error) +} + +// NewAddOnDeploymentConfigGetter returns a AddOnDeploymentConfigGetter with addon client +func NewAddOnDeploymentConfigGetter(addonClient addonv1alpha1client.Interface) AddOnDeploymentConfigGetter { + return &defaultAddOnDeploymentConfigGetter{addonClient: addonClient} +} + +// GetAddOnDeploymentConfigValues uses AddOnDeploymentConfigGetter to get the AddOnDeploymentConfig object, then +// uses AddOnDeploymentConfigToValuesFunc to transform the AddOnDeploymentConfig object to Values object +// If there are multiple AddOnDeploymentConfig objects in the AddOn ConfigReferences, the big index object will +// override the one from small index +func GetAddOnDeploymentConfigValues( + getter AddOnDeploymentConfigGetter, toValuesFuncs ...AddOnDeploymentConfigToValuesFunc) GetValuesFunc { + return func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) { + var lastValues = Values{} + for _, config := range addon.Status.ConfigReferences { + if config.ConfigGroupResource.Group != utils.AddOnDeploymentConfigGVR.Group || + config.ConfigGroupResource.Resource != utils.AddOnDeploymentConfigGVR.Resource { + continue + } + + addOnDeploymentConfig, err := getter.Get(context.Background(), config.Namespace, config.Name) + if err != nil { + return nil, err + } + + for _, toValuesFunc := range toValuesFuncs { + values, err := toValuesFunc(*addOnDeploymentConfig) + if err != nil { + return nil, err + } + lastValues = MergeValues(lastValues, values) + } + } + + return lastValues, nil + } +} + +// ToAddOnDeploymentConfigValues transform the AddOnDeploymentConfig object into Values object that is a plain value map +// for example: the spec of one AddOnDeploymentConfig is: +// +// { +// customizedVariables: [{name: "Image", value: "img"}, {name: "ImagePullPolicy", value: "Always"}], +// nodePlacement: {nodeSelector: {"host": "ssd"}, tolerations: {"key": "test"}}, +// } +// +// after transformed, the key set of Values object will be: {"Image", "ImagePullPolicy", "NodeSelector", "Tolerations"} +func ToAddOnDeploymentConfigValues(config addonapiv1alpha1.AddOnDeploymentConfig) (Values, error) { + values, err := ToAddOnCustomizedVariableValues(config) + if err != nil { + return nil, err + } + + if config.Spec.NodePlacement != nil { + values["NodeSelector"] = config.Spec.NodePlacement.NodeSelector + values["Tolerations"] = config.Spec.NodePlacement.Tolerations + } + + return values, nil +} + +// ToImageOverrideValuesFunc return a func that can use the AddOnDeploymentConfig.spec.Registries to override image, +// then return the overridden value with key imageKey. +// +// for example: the spec of one AddOnDeploymentConfig is: +// { registries: [{source: "quay.io/open-cluster-management/addon-agent", mirror: "quay.io/ocm/addon-agent"}]} +// the imageKey is "helloWorldImage", the image is "quay.io/open-cluster-management/addon-agent:v1" +// after transformed, the Values object will be: {"helloWorldImage": "quay.io/ocm/addon-agent:v1"} +// +// Note: the imageKey can support the nested key, for example: "global.imageOverrides.helloWorldImage", the output +// will be: {"global": {"imageOverrides": {"helloWorldImage": "quay.io/ocm/addon-agent:v1"}}} +func ToImageOverrideValuesFunc(imageKey, image string) AddOnDeploymentConfigToValuesFunc { + return func(config addonapiv1alpha1.AddOnDeploymentConfig) (Values, error) { + if len(imageKey) == 0 { + return nil, fmt.Errorf("imageKey is empty") + } + if len(image) == 0 { + return nil, fmt.Errorf("image is empty") + } + + nestedMap := make(map[string]interface{}) + + keys := strings.Split(imageKey, ".") + currentMap := nestedMap + + for i := 0; i < len(keys)-1; i++ { + key := keys[i] + nextMap := make(map[string]interface{}) + currentMap[key] = nextMap + currentMap = nextMap + } + + lastKey := keys[len(keys)-1] + currentMap[lastKey] = image + + if config.Spec.Registries != nil { + currentMap[lastKey] = OverrideImage(config.Spec.Registries, image) + } + + return nestedMap, nil + } +} + +// OverrideImage checks whether the source configured in registries can match the imagedName, if yes will use the +// mirror value in the registries to override the imageName +func OverrideImage(registries []addonapiv1alpha1.ImageMirror, imageName string) string { + if len(registries) == 0 { + return imageName + } + overrideImageName := imageName + for i := 0; i < len(registries); i++ { + registry := registries[i] + name := overrideImageDirectly(registry.Source, registry.Mirror, imageName) + if name != imageName { + overrideImageName = name + } + } + return overrideImageName +} + +func overrideImageDirectly(source, mirror, imageName string) string { + source = strings.TrimSuffix(source, "/") + mirror = strings.TrimSuffix(mirror, "/") + imageSegments := strings.Split(imageName, "/") + imageNameTag := imageSegments[len(imageSegments)-1] + if source == "" { + if mirror == "" { + return imageNameTag + } + return fmt.Sprintf("%s/%s", mirror, imageNameTag) + } + + if !strings.HasPrefix(imageName, source) { + return imageName + } + + trimSegment := strings.TrimPrefix(imageName, source) + return fmt.Sprintf("%s%s", mirror, trimSegment) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go new file mode 100644 index 000000000..c8588ca49 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go @@ -0,0 +1,197 @@ +package addonfactory + +import ( + "embed" + "fmt" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" +) + +const AddonDefaultInstallNamespace = "open-cluster-management-agent-addon" + +// AnnotationValuesName is the annotation Name of customized values +const AnnotationValuesName string = "addon.open-cluster-management.io/values" + +type Values map[string]interface{} + +type GetValuesFunc func(cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) + +// AgentAddonFactory includes the common fields for building different agentAddon instances. +type AgentAddonFactory struct { + scheme *runtime.Scheme + fs embed.FS + dir string + getValuesFuncs []GetValuesFunc + agentAddonOptions agent.AgentAddonOptions + // trimCRDDescription flag is used to trim the description of CRDs in manifestWork. disabled by default. + trimCRDDescription bool + hostingCluster *clusterv1.ManagedCluster +} + +// NewAgentAddonFactory builds an addonAgentFactory instance with addon name and fs. +// dir is the path prefix based on the fs path. +func NewAgentAddonFactory(addonName string, fs embed.FS, dir string) *AgentAddonFactory { + s := runtime.NewScheme() + _ = scheme.AddToScheme(s) + _ = apiextensionsv1.AddToScheme(s) + _ = apiextensionsv1beta1.AddToScheme(s) + + return &AgentAddonFactory{ + fs: fs, + dir: dir, + agentAddonOptions: agent.AgentAddonOptions{ + AddonName: addonName, + Registration: nil, + InstallStrategy: nil, + HealthProber: nil, + SupportedConfigGVRs: []schema.GroupVersionResource{}, + }, + trimCRDDescription: false, + scheme: s, + } +} + +// WithScheme is an optional configuration, only used when the agentAddon has customized resource types. +func (f *AgentAddonFactory) WithScheme(s *runtime.Scheme) *AgentAddonFactory { + f.scheme = s + _ = scheme.AddToScheme(f.scheme) + _ = apiextensionsv1.AddToScheme(f.scheme) + _ = apiextensionsv1beta1.AddToScheme(f.scheme) + return f +} + +// WithGetValuesFuncs adds a list of the getValues func. +// the values got from the big index Func will override the one from small index Func. +func (f *AgentAddonFactory) WithGetValuesFuncs(getValuesFuncs ...GetValuesFunc) *AgentAddonFactory { + f.getValuesFuncs = getValuesFuncs + return f +} + +// WithInstallStrategy defines the installation strategy of the manifests prescribed by Manifests(..). +func (f *AgentAddonFactory) WithInstallStrategy(strategy *agent.InstallStrategy) *AgentAddonFactory { + if strategy.InstallNamespace == "" { + strategy.InstallNamespace = AddonDefaultInstallNamespace + } + f.agentAddonOptions.InstallStrategy = strategy + + return f +} + +// WithAgentRegistrationOption defines how agent is registered to the hub cluster. +func (f *AgentAddonFactory) WithAgentRegistrationOption(option *agent.RegistrationOption) *AgentAddonFactory { + f.agentAddonOptions.Registration = option + return f +} + +// WithAgentHealthProber defines how is the healthiness status of the ManagedClusterAddon probed. +func (f *AgentAddonFactory) WithAgentHealthProber(prober *agent.HealthProber) *AgentAddonFactory { + f.agentAddonOptions.HealthProber = prober + return f +} + +// WithAgentHostedModeEnabledOption will enable the agent hosted deploying mode. +func (f *AgentAddonFactory) WithAgentHostedModeEnabledOption() *AgentAddonFactory { + f.agentAddonOptions.HostedModeEnabled = true + return f +} + +// WithTrimCRDDescription is to enable trim the description of CRDs in manifestWork. +func (f *AgentAddonFactory) WithTrimCRDDescription() *AgentAddonFactory { + f.trimCRDDescription = true + return f +} + +// WithConfigGVRs defines the addon supported configuration GroupVersionResource +func (f *AgentAddonFactory) WithConfigGVRs(gvrs ...schema.GroupVersionResource) *AgentAddonFactory { + f.agentAddonOptions.SupportedConfigGVRs = append(f.agentAddonOptions.SupportedConfigGVRs, gvrs...) + return f +} + +// WithHostingCluster defines the hosting cluster used in hosted mode. An AgentAddon may use this to provide +// additional metadata. +func (f *AgentAddonFactory) WithHostingCluster(cluster *clusterv1.ManagedCluster) *AgentAddonFactory { + f.hostingCluster = cluster + return f +} + +// BuildHelmAgentAddon builds a helm agentAddon instance. +func (f *AgentAddonFactory) BuildHelmAgentAddon() (agent.AgentAddon, error) { + if err := validateSupportedConfigGVRs(f.agentAddonOptions.SupportedConfigGVRs); err != nil { + return nil, err + } + + userChart, err := loadChart(f.fs, f.dir) + if err != nil { + return nil, err + } + + agentAddon := newHelmAgentAddon(f, userChart) + + return agentAddon, nil +} + +// BuildTemplateAgentAddon builds a template agentAddon instance. +func (f *AgentAddonFactory) BuildTemplateAgentAddon() (agent.AgentAddon, error) { + if err := validateSupportedConfigGVRs(f.agentAddonOptions.SupportedConfigGVRs); err != nil { + return nil, err + } + + templateFiles, err := getTemplateFiles(f.fs, f.dir) + if err != nil { + klog.Errorf("failed to get template files. %v", err) + return nil, err + } + if len(templateFiles) == 0 { + return nil, fmt.Errorf("there is no template files") + } + + agentAddon := newTemplateAgentAddon(f) + + for _, file := range templateFiles { + template, err := f.fs.ReadFile(file) + if err != nil { + return nil, err + } + agentAddon.addTemplateData(file, template) + } + return agentAddon, nil +} + +func validateSupportedConfigGVRs(configGVRs []schema.GroupVersionResource) error { + if len(configGVRs) == 0 { + // no configs required, ignore + return nil + } + + configGVRMap := map[schema.GroupVersionResource]bool{} + for index, gvr := range configGVRs { + if gvr.Empty() { + return fmt.Errorf("config type is empty, index=%d", index) + } + + if gvr.Version == "" { + return fmt.Errorf("config version is required, index=%d", index) + } + + if gvr.Resource == "" { + return fmt.Errorf("config resource is required, index=%d", index) + } + + if _, existed := configGVRMap[gvr]; existed { + return fmt.Errorf("config type %q is duplicated", gvr.String()) + } + configGVRMap[gvr] = true + } + + return nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go new file mode 100644 index 000000000..b3d3f2abe --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go @@ -0,0 +1,252 @@ +package addonfactory + +import ( + "bufio" + "fmt" + "io" + "sort" + "strings" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/engine" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" +) + +// helmBuiltinValues includes the built-in values for helm agentAddon. +// the values in helm chart should begin with a lowercase letter, so we need convert it to Values by JsonStructToValues. +// the built-in values can not be overrided by getValuesFuncs +type helmBuiltinValues struct { + ClusterName string `json:"clusterName"` + AddonInstallNamespace string `json:"addonInstallNamespace"` + HubKubeConfigSecret string `json:"hubKubeConfigSecret,omitempty"` + ManagedKubeConfigSecret string `json:"managedKubeConfigSecret,omitempty"` + InstallMode string `json:"installMode"` +} + +// helmDefaultValues includes the default values for helm agentAddon. +// the values in helm chart should begin with a lowercase letter, so we need convert it to Values by JsonStructToValues. +// the default values can be overrided by getValuesFuncs +type helmDefaultValues struct { + HubKubeConfigSecret string `json:"hubKubeConfigSecret,omitempty"` + ManagedKubeConfigSecret string `json:"managedKubeConfigSecret,omitempty"` + HostingClusterCapabilities chartutil.Capabilities `json:"hostingClusterCapabilities,omitempty"` +} + +type HelmAgentAddon struct { + decoder runtime.Decoder + chart *chart.Chart + getValuesFuncs []GetValuesFunc + agentAddonOptions agent.AgentAddonOptions + trimCRDDescription bool + hostingCluster *clusterv1.ManagedCluster +} + +func newHelmAgentAddon(factory *AgentAddonFactory, chart *chart.Chart) *HelmAgentAddon { + return &HelmAgentAddon{ + decoder: serializer.NewCodecFactory(factory.scheme).UniversalDeserializer(), + chart: chart, + getValuesFuncs: factory.getValuesFuncs, + agentAddonOptions: factory.agentAddonOptions, + trimCRDDescription: factory.trimCRDDescription, + hostingCluster: factory.hostingCluster, + } +} + +func (a *HelmAgentAddon) Manifests( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + var objects []runtime.Object + + values, err := a.getValues(cluster, addon) + if err != nil { + return objects, err + } + + helmEngine := engine.Engine{ + Strict: true, + LintMode: false, + } + + crds := a.chart.CRDObjects() + for _, crd := range crds { + klog.V(4).Infof("%v/n", crd.File.Data) + object, _, err := a.decoder.Decode(crd.File.Data, nil, nil) + if err != nil { + return nil, err + } + objects = append(objects, object) + } + + templates, err := helmEngine.Render(a.chart, values) + if err != nil { + return objects, err + } + + // sort the filenames of the templates so the manifests are ordered consistently + keys := make([]string, 0, len(templates)) + for k := range templates { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + data := templates[k] + + if len(data) == 0 { + continue + } + klog.V(4).Infof("rendered template: %v", data) + + yamlReader := yaml.NewYAMLReader(bufio.NewReader(strings.NewReader(data))) + for { + b, err := yamlReader.Read() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if len(b) != 0 { + object, _, err := a.decoder.Decode(b, nil, nil) + if err != nil { + // In some conditions, resources will be provide by other hub-side components. + // Example case: https://github.com/open-cluster-management-io/addon-framework/pull/72 + if runtime.IsMissingKind(err) { + klog.V(4).Infof("Skipping template %v, reason: %v", k, err) + continue + } + return nil, err + } + objects = append(objects, object) + } + } + + } + + if a.trimCRDDescription { + objects = trimCRDDescription(objects) + } + return objects, nil +} + +func (a *HelmAgentAddon) GetAgentAddonOptions() agent.AgentAddonOptions { + return a.agentAddonOptions +} + +func (a *HelmAgentAddon) getValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (chartutil.Values, error) { + overrideValues := map[string]interface{}{} + + defaultValues, err := a.getDefaultValues(cluster, addon) + if err != nil { + klog.Error("failed to get defaultValue. err:%v", err) + return nil, err + } + overrideValues = MergeValues(overrideValues, defaultValues) + + for i := 0; i < len(a.getValuesFuncs); i++ { + if a.getValuesFuncs[i] != nil { + userValues, err := a.getValuesFuncs[i](cluster, addon) + if err != nil { + return overrideValues, err + } + + klog.V(4).Infof("index=%d, user values: %v", i, userValues) + overrideValues = MergeValues(overrideValues, userValues) + klog.V(4).Infof("index=%d, override values: %v", i, overrideValues) + } + } + + builtinValues, err := a.getBuiltinValues(cluster, addon) + if err != nil { + klog.Error("failed to get builtinValue. err:%v", err) + return nil, err + } + + overrideValues = MergeValues(overrideValues, builtinValues) + + values, err := chartutil.ToRenderValues(a.chart, overrideValues, + a.releaseOptions(cluster, addon), a.capabilities(cluster, addon)) + if err != nil { + klog.Errorf("failed to render helm chart with values %v. err:%v", overrideValues, err) + return values, err + } + + return values, nil +} + +func (a *HelmAgentAddon) getBuiltinValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) { + builtinValues := helmBuiltinValues{} + builtinValues.ClusterName = cluster.GetName() + + installNamespace := addon.Spec.InstallNamespace + if len(installNamespace) == 0 { + installNamespace = AddonDefaultInstallNamespace + } + builtinValues.AddonInstallNamespace = installNamespace + + builtinValues.InstallMode, _ = constants.GetHostedModeInfo(addon.GetAnnotations()) + + helmBuiltinValues, err := JsonStructToValues(builtinValues) + if err != nil { + klog.Error("failed to convert builtinValues to values %v.err:%v", builtinValues, err) + return nil, err + } + return helmBuiltinValues, nil +} + +func (a *HelmAgentAddon) getDefaultValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) { + defaultValues := helmDefaultValues{} + + // TODO: hubKubeConfigSecret depends on the signer configuration in registration, and the registration is an array. + if a.agentAddonOptions.Registration != nil { + defaultValues.HubKubeConfigSecret = fmt.Sprintf("%s-hub-kubeconfig", a.agentAddonOptions.AddonName) + } + + defaultValues.ManagedKubeConfigSecret = fmt.Sprintf("%s-managed-kubeconfig", addon.Name) + + if a.hostingCluster != nil { + defaultValues.HostingClusterCapabilities = *a.capabilities(a.hostingCluster, addon) + } + + helmDefaultValues, err := JsonStructToValues(defaultValues) + if err != nil { + klog.Error("failed to convert defaultValues to values %v.err:%v", defaultValues, err) + return nil, err + } + return helmDefaultValues, nil +} + +// only support Capabilities.KubeVersion +func (a *HelmAgentAddon) capabilities( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) *chartutil.Capabilities { + return &chartutil.Capabilities{ + KubeVersion: chartutil.KubeVersion{Version: cluster.Status.Version.Kubernetes}, + } +} + +// only support Release.Name, Release.Namespace +func (a *HelmAgentAddon) releaseOptions( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) chartutil.ReleaseOptions { + installNamespace := addon.Spec.InstallNamespace + if len(installNamespace) == 0 { + installNamespace = AddonDefaultInstallNamespace + } + return chartutil.ReleaseOptions{Name: a.agentAddonOptions.AddonName, Namespace: installNamespace} +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helper.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helper.go new file mode 100644 index 000000000..0adafbe39 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helper.go @@ -0,0 +1,174 @@ +package addonfactory + +import ( + "embed" + "encoding/json" + "io/fs" + "path/filepath" + "strings" + + "github.com/fatih/structs" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + + "k8s.io/klog/v2" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +// GetValuesFromAddonAnnotation get the values in the annotation of addon cr. +// the key of the annotation is `addon.open-cluster-management.io/values`, the value is a json string which has the values. +// for example: "addon.open-cluster-management.io/values": `{"NodeSelector":{"host":"ssd"},"Image":"quay.io/helloworld:2.4"}` +func GetValuesFromAddonAnnotation( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) { + values := map[string]interface{}{} + annotations := addon.GetAnnotations() + if len(annotations[AnnotationValuesName]) == 0 { + return values, nil + } + + err := json.Unmarshal([]byte(annotations[AnnotationValuesName]), &values) + if err != nil { + return values, err + } + + return values, nil +} + +// MergeValues merges the 2 given Values to a Values. +// the values of b will override that in a for the same fields. +func MergeValues(a, b Values) Values { + out := Values{} + for k, v := range a { + out[k] = v + } + for bk, bv := range b { + if bv, ok := bv.(map[string]interface{}); ok { + if av, ok := out[bk]; ok { + if av, ok := av.(map[string]interface{}); ok { + out[bk] = mergeInterfaceMaps(av, bv) + continue + } + } + } + out[bk] = bv + } + return out +} + +// StructToValues converts the given struct to a Values +func StructToValues(a interface{}) Values { + return structs.Map(a) +} + +// JsonStructToValues converts the given json struct to a Values +func JsonStructToValues(a interface{}) (Values, error) { + raw, err := json.Marshal(a) + if err != nil { + return nil, err + } + v := Values{} + + err = json.Unmarshal(raw, &v) + if err != nil { + return nil, err + } + return v, nil +} + +func loadChart(chartFS embed.FS, chartPrefix string) (*chart.Chart, error) { + files, err := getFiles(chartFS) + if err != nil { + return nil, err + } + + var bfs []*loader.BufferedFile + for _, fileName := range files { + b, err := fs.ReadFile(chartFS, fileName) + if err != nil { + klog.Errorf("failed to read file %v. err:%v", fileName, err) + return nil, err + } + if !strings.HasPrefix(fileName, chartPrefix) { + continue + } + bf := &loader.BufferedFile{ + Name: stripPrefix(chartPrefix, fileName), + Data: b, + } + bfs = append(bfs, bf) + } + + userChart, err := loader.LoadFiles(bfs) + if err != nil { + klog.Errorf("failed to load chart. err:%v", err) + return nil, err + } + return userChart, nil +} + +func getTemplateFiles(templateFS embed.FS, dir string) ([]string, error) { + files, err := getFiles(templateFS) + if err != nil { + return nil, err + } + if dir == "." || len(dir) == 0 { + return files, nil + } + + var templateFiles []string + for _, f := range files { + if strings.HasPrefix(f, dir) { + templateFiles = append(templateFiles, f) + } + + } + return templateFiles, nil +} + +func getFiles(manifestFS embed.FS) ([]string, error) { + var res []string + err := fs.WalkDir(manifestFS, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + res = append(res, path) + return nil + }) + return res, err +} + +func stripPrefix(chartPrefix, path string) string { + prefixNoPathSeparatorSuffix := strings.TrimSuffix(chartPrefix, string(filepath.Separator)) + chartPrefixLen := len(strings.Split(prefixNoPathSeparatorSuffix, string(filepath.Separator))) + pathValues := strings.Split(path, string(filepath.Separator)) + return strings.Join(pathValues[chartPrefixLen:], string(filepath.Separator)) +} + +func mergeInterfaceMaps(a, b map[string]interface{}) map[string]interface{} { + out := map[string]interface{}{} + for k, v := range a { + out[k] = v + } + + for bk, bv := range b { + if bv, ok := bv.(map[string]interface{}); ok { + if av, ok := out[bk]; ok { + if av, ok := av.(map[string]interface{}); ok { + out[bk] = mergeInterfaceMaps(av, bv) + continue + } + } + } + + out[bk] = bv + } + + return out +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go new file mode 100644 index 000000000..5cc864c04 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go @@ -0,0 +1,150 @@ +package addonfactory + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/assets" +) + +// templateBuiltinValues includes the built-in values for template agentAddon. +// the values for template config should begin with an uppercase letter, so we need convert it to Values by StructToValues. +// the built-in values can not be overrided by getValuesFuncs +type templateBuiltinValues struct { + ClusterName string + AddonInstallNamespace string + InstallMode string +} + +// templateDefaultValues includes the default values for template agentAddon. +// the values for template config should begin with an uppercase letter, so we need convert it to Values by StructToValues. +// the default values can be overrided by getValuesFuncs +type templateDefaultValues struct { + HubKubeConfigSecret string + ManagedKubeConfigSecret string +} + +type templateFile struct { + name string + content []byte +} + +type TemplateAgentAddon struct { + decoder runtime.Decoder + templateFiles []templateFile + getValuesFuncs []GetValuesFunc + agentAddonOptions agent.AgentAddonOptions + trimCRDDescription bool +} + +func newTemplateAgentAddon(factory *AgentAddonFactory) *TemplateAgentAddon { + return &TemplateAgentAddon{ + decoder: serializer.NewCodecFactory(factory.scheme).UniversalDeserializer(), + getValuesFuncs: factory.getValuesFuncs, + agentAddonOptions: factory.agentAddonOptions, + trimCRDDescription: factory.trimCRDDescription, + } +} + +func (a *TemplateAgentAddon) Manifests( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + var objects []runtime.Object + + configValues, err := a.getValues(cluster, addon) + if err != nil { + return objects, err + } + + for _, file := range a.templateFiles { + if len(file.content) == 0 { + continue + } + klog.V(4).Infof("rendered template: %v", file.content) + raw := assets.MustCreateAssetFromTemplate(file.name, file.content, configValues).Data + object, _, err := a.decoder.Decode(raw, nil, nil) + if err != nil { + if runtime.IsMissingKind(err) { + klog.V(4).Infof("Skipping template %v, reason: %v", file.name, err) + continue + } + return nil, err + } + objects = append(objects, object) + } + + if a.trimCRDDescription { + objects = trimCRDDescription(objects) + } + return objects, nil +} + +func (a *TemplateAgentAddon) GetAgentAddonOptions() agent.AgentAddonOptions { + return a.agentAddonOptions +} + +func (a *TemplateAgentAddon) getValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) { + overrideValues := map[string]interface{}{} + + defaultValues := a.getDefaultValues(cluster, addon) + overrideValues = MergeValues(overrideValues, defaultValues) + + for i := 0; i < len(a.getValuesFuncs); i++ { + if a.getValuesFuncs[i] != nil { + userValues, err := a.getValuesFuncs[i](cluster, addon) + if err != nil { + return overrideValues, err + } + overrideValues = MergeValues(overrideValues, userValues) + } + } + builtinValues := a.getBuiltinValues(cluster, addon) + overrideValues = MergeValues(overrideValues, builtinValues) + + return overrideValues, nil +} + +func (a *TemplateAgentAddon) getBuiltinValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) Values { + builtinValues := templateBuiltinValues{} + builtinValues.ClusterName = cluster.GetName() + + installNamespace := addon.Spec.InstallNamespace + if len(installNamespace) == 0 { + installNamespace = AddonDefaultInstallNamespace + } + builtinValues.AddonInstallNamespace = installNamespace + + builtinValues.InstallMode, _ = constants.GetHostedModeInfo(addon.GetAnnotations()) + + return StructToValues(builtinValues) +} + +func (a *TemplateAgentAddon) getDefaultValues( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) Values { + defaultValues := templateDefaultValues{} + + // TODO: hubKubeConfigSecret depends on the signer configuration in registration, and the registration is an array. + if a.agentAddonOptions.Registration != nil { + defaultValues.HubKubeConfigSecret = fmt.Sprintf("%s-hub-kubeconfig", a.agentAddonOptions.AddonName) + } + + defaultValues.ManagedKubeConfigSecret = fmt.Sprintf("%s-managed-kubeconfig", addon.Name) + + return StructToValues(defaultValues) +} + +func (a *TemplateAgentAddon) addTemplateData(file string, data []byte) { + a.templateFiles = append(a.templateFiles, templateFile{name: file, content: data}) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/test_helper.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/test_helper.go new file mode 100644 index 000000000..942814163 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/test_helper.go @@ -0,0 +1,31 @@ +package addonfactory + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +func NewFakeManagedCluster(name string, k8sVersion string) *clusterv1.ManagedCluster { + return &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: clusterv1.ManagedClusterSpec{}, + Status: clusterv1.ManagedClusterStatus{Version: clusterv1.ManagedClusterVersion{Kubernetes: k8sVersion}}, + } +} + +func NewFakeManagedClusterAddon(name, clusterName, installNamespace, values string) *addonapiv1alpha1.ManagedClusterAddOn { + return &addonapiv1alpha1.ManagedClusterAddOn{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: clusterName, + Annotations: map[string]string{ + AnnotationValuesName: values, + }, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{InstallNamespace: installNamespace}, + } +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/Chart.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/Chart.yaml new file mode 100644 index 000000000..198f05e66 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +description: A Helm chart for test +name: test +version: 2.2.0 +appVersion: "2.2.0" diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/crds/test_crd.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/crds/test_crd.yaml new file mode 100644 index 000000000..55e70e1b8 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/crds/test_crd.yaml @@ -0,0 +1,195 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: test.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ClusterClaim + listKind: ClusterClaimList + plural: clusterclaims + singular: clusterclaim + scope: Cluster + preserveUnknownFields: false + versions: + - additionalPrinterColumns: + - jsonPath: .spec.addOnMeta.displayName + name: DISPLAY NAME + type: string + - jsonPath: .spec.addOnConfiguration.crdName + name: CRD NAME + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterManagementAddOn represents the registration of an add-on to the cluster manager. This resource allows the user to discover which add-on is available for the cluster manager and also provides metadata information about the add-on. This resource also provides a linkage to ManagedClusterAddOn, the name of the ClusterManagementAddOn resource will be used for the namespace-scoped ManagedClusterAddOn resource. ClusterManagementAddOn is a cluster-scoped resource. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec represents a desired configuration for the agent on the cluster management add-on. + type: object + properties: + addOnConfiguration: + description: 'Deprecated: Use supportedConfigs filed instead addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.' + type: object + properties: + crName: + description: crName is the name of the CR used to configure instances of the managed add-on. This field should be configured if add-on CR have a consistent name across the all of the ManagedCluster instaces. + type: string + crdName: + description: crdName is the name of the CRD used to configure instances of the managed add-on. This field should be configured if the add-on have a CRD that controls the configuration of the add-on. + type: string + lastObservedGeneration: + description: lastObservedGeneration is the observed generation of the custom resource for the configuration of the addon. + type: integer + format: int64 + addOnMeta: + description: addOnMeta is a reference to the metadata information for the add-on. + type: object + properties: + description: + description: description represents the detailed description of the add-on. + type: string + displayName: + description: displayName represents the name of add-on that will be displayed. + type: string + installStrategy: + description: InstallStrategy represents that related ManagedClusterAddOns should be installed on certain clusters. + type: object + default: + type: Manual + properties: + placements: + description: Placements is a list of placement references honored when install strategy type is Placements. All clusters selected by these placements will install the addon If one cluster belongs to multiple placements, it will only apply the strategy defined later in the order. That is to say, The latter strategy overrides the previous one. + type: array + items: + type: object + required: + - name + - namespace + properties: + configs: + description: Configs is the configuration of managedClusterAddon during installation. User can override the configuration by updating the managedClusterAddon directly. + type: array + items: + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + default: "" + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + name: + description: Name is the name of the placement + type: string + minLength: 1 + namespace: + description: Namespace is the namespace of the placement + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + type: + description: 'Type is the type of the install strategy, it can be: - Manual: no automatic install - Placements: install to clusters selected by placements.' + type: string + default: Manual + enum: + - Manual + - Placements + supportedConfigs: + description: supportedConfigs is a list of configuration types supported by add-on. An empty list means the add-on does not require configurations. The default is an empty list + type: array + items: + description: ConfigMeta represents a collection of metadata information for add-on configuration. + type: object + required: + - resource + properties: + defaultConfig: + description: defaultConfig represents the namespace and name of the default add-on configuration. In scenario where all add-ons have a same configuration. + type: object + required: + - name + properties: + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + group: + description: group of the add-on configuration. + type: string + default: "" + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - group + - resource + x-kubernetes-list-type: map + status: + description: status represents the current status of cluster management add-on. + type: object + allOf: + - id: "abc" + items: + - schema: + description: test + oneOf: + - id: "abc" + patternProperties: + abc: + description: test + dependencies: + abc: + property: + - "abc" + definitions: + abc: + description: test + anyOf: + - id: "abc" + additionalProperties: + schema: + description: test + additionalItems: + schema: + description: test + not: + description: test + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/_helpers.tpl b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/_helpers.tpl new file mode 100644 index 000000000..5e8e62ea4 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "test.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "test.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "test.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim.yaml new file mode 100644 index 000000000..a404675da --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim.yaml @@ -0,0 +1,14 @@ +{{- if not (eq .Values.clusterName "local-cluster") }} +apiVersion: cluster.open-cluster-management.io/v1alpha1 +kind: ClusterClaim +metadata: + annotations: + hubKubeConfigSecret: {{ .Values.hubKubeConfigSecret }} + managedKubeConfigSecret: {{ .Values.managedKubeConfigSecret }} + labels: + open-cluster-management.io/hub-managed: "" + open-cluster-management.io/spoke-only: "" + name: clusterName +spec: + value: {{ .Values.clusterName }} +{{- end }} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim_crd.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim_crd.yaml new file mode 100644 index 000000000..465ceea67 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim_crd.yaml @@ -0,0 +1,249 @@ +{{- if not (eq .Values.clusterName "local-cluster") }} +{{- if semverCompare "< 1.16.0" .Capabilities.KubeVersion.Version }} +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterclaims.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ClusterClaim + listKind: ClusterClaimList + plural: clusterclaims + singular: clusterclaim + scope: Cluster + preserveUnknownFields: false + versions: + - additionalPrinterColumns: + - jsonPath: .spec.addOnMeta.displayName + name: DISPLAY NAME + type: string + - jsonPath: .spec.addOnConfiguration.crdName + name: CRD NAME + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterManagementAddOn represents the registration of an add-on to the cluster manager. This resource allows the user to discover which add-on is available for the cluster manager and also provides metadata information about the add-on. This resource also provides a linkage to ManagedClusterAddOn, the name of the ClusterManagementAddOn resource will be used for the namespace-scoped ManagedClusterAddOn resource. ClusterManagementAddOn is a cluster-scoped resource. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec represents a desired configuration for the agent on the cluster management add-on. + type: object + properties: + addOnConfiguration: + description: 'Deprecated: Use supportedConfigs filed instead addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.' + type: object + properties: + crName: + description: crName is the name of the CR used to configure instances of the managed add-on. This field should be configured if add-on CR have a consistent name across the all of the ManagedCluster instaces. + type: string + crdName: + description: crdName is the name of the CRD used to configure instances of the managed add-on. This field should be configured if the add-on have a CRD that controls the configuration of the add-on. + type: string + lastObservedGeneration: + description: lastObservedGeneration is the observed generation of the custom resource for the configuration of the addon. + type: integer + format: int64 + addOnMeta: + description: addOnMeta is a reference to the metadata information for the add-on. + type: object + properties: + description: + description: description represents the detailed description of the add-on. + type: string + displayName: + description: displayName represents the name of add-on that will be displayed. + type: string + installStrategy: + description: InstallStrategy represents that related ManagedClusterAddOns should be installed on certain clusters. + type: object + default: + type: Manual + properties: + placements: + description: Placements is a list of placement references honored when install strategy type is Placements. All clusters selected by these placements will install the addon If one cluster belongs to multiple placements, it will only apply the strategy defined later in the order. That is to say, The latter strategy overrides the previous one. + type: array + items: + type: object + required: + - name + - namespace + properties: + configs: + description: Configs is the configuration of managedClusterAddon during installation. User can override the configuration by updating the managedClusterAddon directly. + type: array + items: + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + default: "" + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + name: + description: Name is the name of the placement + type: string + minLength: 1 + namespace: + description: Namespace is the namespace of the placement + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + type: + description: 'Type is the type of the install strategy, it can be: - Manual: no automatic install - Placements: install to clusters selected by placements.' + type: string + default: Manual + enum: + - Manual + - Placements + supportedConfigs: + description: supportedConfigs is a list of configuration types supported by add-on. An empty list means the add-on does not require configurations. The default is an empty list + type: array + items: + description: ConfigMeta represents a collection of metadata information for add-on configuration. + type: object + required: + - resource + properties: + defaultConfig: + description: defaultConfig represents the namespace and name of the default add-on configuration. In scenario where all add-ons have a same configuration. + type: object + required: + - name + properties: + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + group: + description: group of the add-on configuration. + type: string + default: "" + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - group + - resource + x-kubernetes-list-type: map + status: + description: status represents the current status of cluster management add-on. + type: object + allOf: + - id: "abc" + items: + - schema: + description: test + oneOf: + - id: "abc" + patternProperties: + abc: + description: test + dependencies: + abc: + property: + - "abc" + definitions: + abc: + description: test + anyOf: + - id: "abc" + additionalProperties: + schema: + description: test + additionalItems: + schema: + description: test + not: + description: test + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +{{ else }} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterclaims.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ClusterClaim + listKind: ClusterClaimList + plural: clusterclaims + singular: clusterclaim + scope: Cluster + preserveUnknownFields: false + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterClaim represents cluster information that a managed cluster claims ClusterClaims with well known names include, 1. id.k8s.io, it contains a unique identifier for the cluster. 2. clusterset.k8s.io, it contains an identifier that relates the cluster to the ClusterSet in which it belongs. ClusterClaims created on a managed cluster will be collected and saved into the status of the corresponding ManagedCluster on hub. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of the ClusterClaim. + type: object + properties: + value: + description: Value is a claim-dependent string + type: string + maxLength: 1024 + minLength: 1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end }} +{{- end }} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/deployment.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/deployment.yaml new file mode 100644 index 000000000..4604895fa --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/deployment.yaml @@ -0,0 +1,103 @@ +{{- if .Values.enabled }} +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ template "test.fullname" . }}-addon + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "test.name" . }}-addon + chart: {{ template "test.chart" . }} + component: "addon" + release: {{ .Release.Name }} + controller: "operator" + hubKubeConfigSecret: {{ .Values.hubKubeConfigSecret }} +spec: + {{- if eq .Values.clusterName "local-cluster" }} + replicas: 3 + {{- else }} + replicas: 1 + {{- end }} + selector: + matchLabels: + app: {{ template "test.name" . }}-addon + component: "addon" + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "test.name" . }}-addon + component: "addon" + controller: "operator" + release: {{ .Release.Name }} + chart: {{ template "test.chart" . }} + spec: + serviceAccountName: {{ template "test.fullname" . }} + containers: + - name: test + image: "{{ .Values.global.imageOverrides.testImage }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + resources: + {{- toYaml .Values.resources | nindent 10 }} + command: ["test"] + env: + - name: WATCH_NAMESPACE + value: "{{ .Values.clusterName }}" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "test" + {{- if .Values.global.proxyConfig.HTTP_PROXY }} + - name: HTTP_PROXY + value: {{ .Values.global.proxyConfig.HTTP_PROXY }} + {{- end }} + {{- if .Values.global.proxyConfig.HTTPS_PROXY }} + - name: HTTPS_PROXY + value: {{ .Values.global.proxyConfig.HTTPS_PROXY }} + {{- end }} + {{- if .Values.global.proxyConfig.NO_PROXY }} + - name: NO_PROXY + value: {{ .Values.global.proxyConfig.NO_PROXY }} + {{- end }} + args: + - '--enable-lease=true' + {{- if semverCompare "< 1.14.0" .Capabilities.KubeVersion.Version }} + - --legacy-leader-elect=true + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + failureThreshold: 3 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + failureThreshold: 3 + periodSeconds: 10 + volumeMounts: + - name: klusterlet-config + mountPath: /var/run/klusterlet + volumes: + - name: klusterlet-config + secret: + secretName: {{ .Values.hubKubeConfigSecret }} + {{- if .Values.imagePullSecret }} + imagePullSecrets: + - name: "{{ .Values.imagePullSecret }}" + {{- end }} + {{- with .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/multi_resources.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/multi_resources.yaml new file mode 100644 index 000000000..7b93d1f4b --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/multi_resources.yaml @@ -0,0 +1,21 @@ +{{- if (eq .Values.clusterName "cluster2") }} +apiVersion: cluster.open-cluster-management.io/v1alpha1 +kind: ClusterClaim +metadata: + labels: + open-cluster-management.io/hub-managed: "" + open-cluster-management.io/spoke-only: "" + name: cc1 +spec: + value: {{ .Values.clusterName }} +--- +apiVersion: cluster.open-cluster-management.io/v1alpha1 +kind: ClusterClaim +metadata: + labels: + open-cluster-management.io/hub-managed: "" + open-cluster-management.io/spoke-only: "" + name: cc2 +spec: + value: {{ .Values.clusterName }} +{{- end }} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/namespace_hosted_mode.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/namespace_hosted_mode.yaml new file mode 100644 index 000000000..00be8aa0f --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/namespace_hosted_mode.yaml @@ -0,0 +1,8 @@ +{{- if .Values.hostingClusterCapabilities.KubeVersion.Version }} +{{- if semverCompare "> 1.16.0" .Values.hostingClusterCapabilities.KubeVersion.Version }} +apiVersion: v1 +kind: Namespace +metadata: + name: "newer-k8s" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/values.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/values.yaml new file mode 100644 index 000000000..2c9d28c55 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/values.yaml @@ -0,0 +1,42 @@ +org: open-cluster-management + +enabled: true +hubKubeConfigSecret: null +fullnameOverride: "test-addon" +nameOverride: "test-addon" + +clusterName: null +clusterNamespace: null + +resources: + requests: + memory: 128Mi + limits: + memory: 256Mi + +affinity: {} + +tolerations: +- key: "dedicated" + operator: "Equal" + value: "infra" + effect: "NoSchedule" +- key: node-role.kubernetes.io/infra + operator: Exists + effect: NoSchedule + +postDeleteJobServiceAccount: null + +imagePullPolicy: IfNotPresent +imagePullSecret: null + +global: + imagePullPolicy: IfNotPresent + imagePullSecret: null + imageOverrides: + testImage: quay.io/testimage:test + nodeSelector: {} + proxyConfig: + HTTP_PROXY: null + HTTPS_PROXY: null + NO_PROXY: null diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/template/clusterclaim.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/template/clusterclaim.yaml new file mode 100644 index 000000000..4e8070cfd --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/template/clusterclaim.yaml @@ -0,0 +1,15 @@ +{{ if not (eq .ClusterName "local-cluster") }} +--- +apiVersion: cluster.open-cluster-management.io/v1alpha1 +kind: ClusterClaim +metadata: + annotations: + hubKubeConfigSecret: {{ .HubKubeConfigSecret }} + managedKubeConfigSecret: {{ .ManagedKubeConfigSecret }} + labels: + open-cluster-management.io/hub-managed: "" + open-cluster-management.io/spoke-only: "" + name: {{ .AddonInstallNamespace }} +spec: + value: {{ .Image }} +{{ end }} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/template/deployment.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/template/deployment.yaml new file mode 100644 index 000000000..4d66c1b35 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/template/deployment.yaml @@ -0,0 +1,41 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helloworld-agent + namespace: {{ .AddonInstallNamespace }} + labels: + app: helloworld-agent + clusterName: {{ .ClusterName }} +spec: + replicas: 1 + selector: + matchLabels: + app: helloworld-agent + template: + metadata: + labels: + app: helloworld-agent + spec: + serviceAccountName: helloworld-agent-sa +{{- if .NodeSelector }} + nodeSelector: + {{- range $key, $value := .NodeSelector }} + "{{ $key }}": "{{ $value }}" + {{- end }} +{{- end }} + volumes: + - name: hub-config + secret: + secretName: {{ .KubeConfigSecret }} + containers: + - name: helloworld-agent + image: {{ .Image }} + imagePullPolicy: IfNotPresent + args: + - "/helloworld" + - "agent" + - "--hub-kubeconfig=/var/run/hub/kubeconfig" + - "--cluster-name={{ .ClusterName }}" + volumeMounts: + - name: hub-config + mountPath: /var/run/hub diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/trimcrds.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/trimcrds.go new file mode 100644 index 000000000..20ff065c5 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/trimcrds.go @@ -0,0 +1,222 @@ +package addonfactory + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +func trimCRDDescription(objects []runtime.Object) []runtime.Object { + rstObjects := []runtime.Object{} + for _, o := range objects { + switch object := o.(type) { + case *apiextensionsv1.CustomResourceDefinition: + trimCRDv1Description(object) + rstObjects = append(rstObjects, object) + case *apiextensionsv1beta1.CustomResourceDefinition: + trimCRDv1beta1Description(object) + rstObjects = append(rstObjects, object) + default: + rstObjects = append(rstObjects, object) + } + } + + return rstObjects +} + +// trimCRDv1Description is to remove the description info in the versions of CRD spec +func trimCRDv1Description(crd *apiextensionsv1.CustomResourceDefinition) { + versions := crd.Spec.Versions + for i := range versions { + if versions[i].Schema != nil { + removeDescriptionV1(versions[i].Schema.OpenAPIV3Schema) + } + } +} + +func removeDescriptionV1(p *apiextensionsv1.JSONSchemaProps) { + if p == nil { + return + } + + p.Description = "" + + if p.Items != nil { + removeDescriptionV1(p.Items.Schema) + for i := range p.Items.JSONSchemas { + removeDescriptionV1(&p.Items.JSONSchemas[i]) + } + } + + if len(p.AllOf) != 0 { + for i := range p.AllOf { + removeDescriptionV1(&p.AllOf[i]) + } + } + + if len(p.OneOf) != 0 { + for i := range p.OneOf { + removeDescriptionV1(&p.OneOf[i]) + } + } + + if len(p.AnyOf) != 0 { + for i := range p.AnyOf { + removeDescriptionV1(&p.AnyOf[i]) + } + } + + if p.Not != nil { + removeDescriptionV1(p.Not) + } + + if len(p.Properties) != 0 { + newProperties := map[string]apiextensionsv1.JSONSchemaProps{} + for k := range p.Properties { + v := p.Properties[k] + removeDescriptionV1(&v) + newProperties[k] = v + } + p.Properties = newProperties + } + + if len(p.PatternProperties) != 0 { + newProperties := map[string]apiextensionsv1.JSONSchemaProps{} + for k := range p.PatternProperties { + v := p.PatternProperties[k] + removeDescriptionV1(&v) + newProperties[k] = v + } + p.PatternProperties = newProperties + } + + if p.AdditionalProperties != nil { + removeDescriptionV1(p.AdditionalProperties.Schema) + } + + if len(p.Dependencies) != 0 { + newDependencies := map[string]apiextensionsv1.JSONSchemaPropsOrStringArray{} + for k := range p.Dependencies { + v := p.Dependencies[k] + removeDescriptionV1(v.Schema) + newDependencies[k] = v + } + p.Dependencies = newDependencies + } + + if p.AdditionalItems != nil { + removeDescriptionV1(p.AdditionalItems.Schema) + } + + if len(p.Definitions) != 0 { + newDefinitions := map[string]apiextensionsv1.JSONSchemaProps{} + for k := range p.Definitions { + v := p.Definitions[k] + removeDescriptionV1(&v) + newDefinitions[k] = v + } + p.Definitions = newDefinitions + } + + if p.ExternalDocs != nil { + p.ExternalDocs.Description = "" + } +} + +// trimCRDv1beta1Description is to remove the description info in the versions of CRD spec +func trimCRDv1beta1Description(crd *apiextensionsv1beta1.CustomResourceDefinition) { + versions := crd.Spec.Versions + for i := range versions { + if versions[i].Schema != nil { + removeDescriptionV1beta1(versions[i].Schema.OpenAPIV3Schema) + } + } +} + +func removeDescriptionV1beta1(p *apiextensionsv1beta1.JSONSchemaProps) { + if p == nil { + return + } + + p.Description = "" + + if p.Items != nil { + removeDescriptionV1beta1(p.Items.Schema) + for i := range p.Items.JSONSchemas { + removeDescriptionV1beta1(&p.Items.JSONSchemas[i]) + } + } + + if len(p.AllOf) != 0 { + for i := range p.AllOf { + removeDescriptionV1beta1(&p.AllOf[i]) + } + } + + if len(p.OneOf) != 0 { + for i := range p.OneOf { + removeDescriptionV1beta1(&p.OneOf[i]) + } + } + + if len(p.AnyOf) != 0 { + for i := range p.AnyOf { + removeDescriptionV1beta1(&p.AnyOf[i]) + } + } + + if p.Not != nil { + removeDescriptionV1beta1(p.Not) + } + + if len(p.Properties) != 0 { + newProperties := map[string]apiextensionsv1beta1.JSONSchemaProps{} + for k := range p.Properties { + v := p.Properties[k] + removeDescriptionV1beta1(&v) + newProperties[k] = v + } + p.Properties = newProperties + } + + if len(p.PatternProperties) != 0 { + newProperties := map[string]apiextensionsv1beta1.JSONSchemaProps{} + for k := range p.PatternProperties { + v := p.PatternProperties[k] + removeDescriptionV1beta1(&v) + newProperties[k] = v + } + p.PatternProperties = newProperties + } + + if p.AdditionalProperties != nil { + removeDescriptionV1beta1(p.AdditionalProperties.Schema) + } + + if len(p.Dependencies) != 0 { + newDependencies := map[string]apiextensionsv1beta1.JSONSchemaPropsOrStringArray{} + for k, v := range p.Dependencies { + removeDescriptionV1beta1(v.Schema) + newDependencies[k] = v + } + p.Dependencies = newDependencies + } + + if p.AdditionalItems != nil { + removeDescriptionV1beta1(p.AdditionalItems.Schema) + } + + if len(p.Definitions) != 0 { + newDefinitions := map[string]apiextensionsv1beta1.JSONSchemaProps{} + for k := range p.Definitions { + v := p.Definitions[k] + removeDescriptionV1beta1(&v) + newDefinitions[k] = v + } + p.Definitions = newDefinitions + } + + if p.ExternalDocs != nil { + p.ExternalDocs.Description = "" + } +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting/helpers.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting/helpers.go new file mode 100644 index 000000000..c0a6c43de --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting/helpers.go @@ -0,0 +1,350 @@ +package addontesting + +import ( + "context" + "fmt" + "testing" + "time" + + certv1 "k8s.io/api/certificates/v1" + certv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/util/workqueue" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/events" +) + +type FakeSyncContext struct { + queue workqueue.RateLimitingInterface + recorder events.Recorder +} + +func NewFakeSyncContext(t *testing.T) *FakeSyncContext { + return &FakeSyncContext{ + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + recorder: NewTestingEventRecorder(t), + } +} + +func (f FakeSyncContext) Queue() workqueue.RateLimitingInterface { return f.queue } +func (f FakeSyncContext) Recorder() events.Recorder { return f.recorder } + +func NewUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + }, + } +} + +func NewHostingUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured { + u := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + }, + } + u.SetAnnotations(map[string]string{ + addonapiv1alpha1.HostedManifestLocationAnnotationKey: addonapiv1alpha1.HostedManifestLocationHostingValue, + }) + return u +} + +func NewHookJob(name, namespace string) *unstructured.Unstructured { + job := NewUnstructured("batch/v1", "Job", namespace, name) + job.SetAnnotations(map[string]string{addonapiv1alpha1.AddonPreDeleteHookAnnotationKey: ""}) + return job +} + +func NewHostedHookJob(name, namespace string) *unstructured.Unstructured { + job := NewUnstructured("batch/v1", "Job", namespace, name) + job.SetAnnotations(map[string]string{addonapiv1alpha1.AddonPreDeleteHookAnnotationKey: "", + addonapiv1alpha1.HostedManifestLocationAnnotationKey: addonapiv1alpha1.HostedManifestLocationHostingValue}) + return job +} + +func NewAddon(name, namespace string, owners ...metav1.OwnerReference) *addonapiv1alpha1.ManagedClusterAddOn { + return &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + OwnerReferences: owners, + }, + } +} +func NewAddonWithConditions(name, namespace string, conditions ...metav1.Condition) *addonapiv1alpha1.ManagedClusterAddOn { + return &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + Conditions: conditions, + }, + } +} + +func NewHostedModeAddon(name, namespace string, hostingCluster string, + conditions ...metav1.Condition) *addonapiv1alpha1.ManagedClusterAddOn { + return &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{addonapiv1alpha1.HostingClusterNameAnnotationKey: hostingCluster}, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + Conditions: conditions, + }, + } +} + +func NewHostedModeAddonWithFinalizer(name, namespace string, hostingCluster string, + conditions ...metav1.Condition) *addonapiv1alpha1.ManagedClusterAddOn { + addon := NewHostedModeAddon(name, namespace, hostingCluster) + addon.SetFinalizers([]string{addonapiv1alpha1.AddonHostingManifestFinalizer}) + addon.Status.Conditions = conditions + return addon +} + +func SetAddonDeletionTimestamp(addon *addonapiv1alpha1.ManagedClusterAddOn, + deletionTimestamp time.Time) *addonapiv1alpha1.ManagedClusterAddOn { + addon.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} + return addon +} + +func SetAddonFinalizers(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizers ...string) *addonapiv1alpha1.ManagedClusterAddOn { + addon.SetFinalizers(finalizers) + return addon +} + +type clusterManagementAddonBuilder struct { + clusterManagementAddOn *addonapiv1alpha1.ClusterManagementAddOn +} + +func NewClusterManagementAddon(name, crd, cr string) *clusterManagementAddonBuilder { + return &clusterManagementAddonBuilder{ + &addonapiv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{ + AddOnConfiguration: addonapiv1alpha1.ConfigCoordinates{ + CRDName: crd, + CRName: cr, + }, + InstallStrategy: addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyManual, + }, + }, + }, + } +} + +func (b *clusterManagementAddonBuilder) WithSupportedConfigs(supportedConfigs ...addonapiv1alpha1.ConfigMeta) *clusterManagementAddonBuilder { + b.clusterManagementAddOn.Spec.SupportedConfigs = supportedConfigs + return b +} + +func (b *clusterManagementAddonBuilder) WithPlacementStrategy(placements ...addonapiv1alpha1.PlacementStrategy) *clusterManagementAddonBuilder { + b.clusterManagementAddOn.Spec.InstallStrategy.Type = addonapiv1alpha1.AddonInstallStrategyPlacements + b.clusterManagementAddOn.Spec.InstallStrategy.Placements = placements + return b +} + +func (b *clusterManagementAddonBuilder) WithDefaultConfigReferences( + defaultConfigReferences ...addonapiv1alpha1.DefaultConfigReference, +) *clusterManagementAddonBuilder { + b.clusterManagementAddOn.Status.DefaultConfigReferences = defaultConfigReferences + return b +} + +func (b *clusterManagementAddonBuilder) WithInstallProgression(installProgressions ...addonapiv1alpha1.InstallProgression) *clusterManagementAddonBuilder { + b.clusterManagementAddOn.Status.InstallProgressions = installProgressions + return b +} + +func (b *clusterManagementAddonBuilder) Build() *addonapiv1alpha1.ClusterManagementAddOn { + return b.clusterManagementAddOn +} + +func NewManifestWork(name, namespace string, objects ...*unstructured.Unstructured) *workapiv1.ManifestWork { + work := &workapiv1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: workapiv1.ManifestWorkSpec{ + Workload: workapiv1.ManifestsTemplate{ + Manifests: []workapiv1.Manifest{}, + }, + }, + } + + for _, object := range objects { + objectStr, _ := object.MarshalJSON() + manifest := workapiv1.Manifest{} + manifest.Raw = objectStr + work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, manifest) + } + + return work +} + +func NewManagedCluster(name string) *clusterv1.ManagedCluster { + return &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} + +func DeleteManagedCluster(c *clusterv1.ManagedCluster) *clusterv1.ManagedCluster { + c.DeletionTimestamp = &metav1.Time{ + Time: time.Now(), + } + return c +} + +func SetManagedClusterAnnotation(c *clusterv1.ManagedCluster, + annotations map[string]string) *clusterv1.ManagedCluster { + c.Annotations = annotations + return c +} + +func NewCSR(addon, cluster string) *certv1.CertificateSigningRequest { + return &certv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("addon-%s", addon), + Labels: map[string]string{ + "open-cluster-management.io/cluster-name": cluster, + "open-cluster-management.io/addon-name": addon, + }, + }, + Spec: certv1.CertificateSigningRequestSpec{}, + } +} + +func NewDeniedCSR(addon, cluster string) *certv1.CertificateSigningRequest { + csr := NewCSR(addon, cluster) + csr.Status.Conditions = append(csr.Status.Conditions, certv1.CertificateSigningRequestCondition{ + Type: certv1.CertificateDenied, + Status: corev1.ConditionTrue, + }) + return csr +} + +func NewApprovedCSR(addon, cluster string) *certv1.CertificateSigningRequest { + csr := NewCSR(addon, cluster) + csr.Status.Conditions = append(csr.Status.Conditions, certv1.CertificateSigningRequestCondition{ + Type: certv1.CertificateApproved, + Status: corev1.ConditionTrue, + }) + return csr +} + +func NewV1beta1CSR(addon, cluster string) *certv1beta1.CertificateSigningRequest { + return &certv1beta1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("addon-%s", addon), + Labels: map[string]string{ + "open-cluster-management.io/cluster-name": cluster, + "open-cluster-management.io/addon-name": addon, + }, + }, + Spec: certv1beta1.CertificateSigningRequestSpec{}, + } +} + +func NewDeniedV1beta1CSR(addon, cluster string) *certv1beta1.CertificateSigningRequest { + csr := NewV1beta1CSR(addon, cluster) + csr.Status.Conditions = append(csr.Status.Conditions, certv1beta1.CertificateSigningRequestCondition{ + Type: certv1beta1.CertificateDenied, + Status: corev1.ConditionTrue, + }) + return csr +} + +func NewApprovedV1beta1CSR(addon, cluster string) *certv1beta1.CertificateSigningRequest { + csr := NewV1beta1CSR(addon, cluster) + csr.Status.Conditions = append(csr.Status.Conditions, certv1beta1.CertificateSigningRequestCondition{ + Type: certv1beta1.CertificateApproved, + Status: corev1.ConditionTrue, + }) + return csr +} + +// AssertActions asserts the actual actions have the expected action verb +func AssertActions(t *testing.T, actualActions []clienttesting.Action, expectedVerbs ...string) { + if len(actualActions) != len(expectedVerbs) { + t.Fatalf("expected %d call but got: %#v", len(expectedVerbs), actualActions) + } + for i, expected := range expectedVerbs { + if actualActions[i].GetVerb() != expected { + t.Errorf("expected %s action but got: %#v", expected, actualActions[i]) + } + } +} + +// AssertNoActions asserts no actions are happened +func AssertNoActions(t *testing.T, actualActions []clienttesting.Action) { + AssertActions(t, actualActions) +} + +type TestingEventRecorder struct { + t *testing.T + component string +} + +func (r *TestingEventRecorder) WithContext(ctx context.Context) events.Recorder { + return r +} + +// NewTestingEventRecorder provides event recorder that will log all recorded events to the error log. +func NewTestingEventRecorder(t *testing.T) events.Recorder { + return &TestingEventRecorder{t: t, component: "test"} +} + +func (r *TestingEventRecorder) ComponentName() string { + return r.component +} + +func (r *TestingEventRecorder) ForComponent(c string) events.Recorder { + return &TestingEventRecorder{t: r.t, component: c} +} + +func (r *TestingEventRecorder) Shutdown() {} + +func (r *TestingEventRecorder) WithComponentSuffix(suffix string) events.Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *TestingEventRecorder) Event(reason, message string) { + r.t.Logf("Event: %v: %v", reason, message) +} + +func (r *TestingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *TestingEventRecorder) Warning(reason, message string) { + r.t.Logf("Warning: %v: %v", reason, message) +} + +func (r *TestingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go new file mode 100644 index 000000000..56e6f0169 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go @@ -0,0 +1,66 @@ +package constants + +import ( + "fmt" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +const ( + // InstallModeBuiltinValueKey is the key of the build in value to represent the addon install mode, addon developers + // can use this built in value in manifests. + InstallModeBuiltinValueKey = "InstallMode" + InstallModeHosted = "Hosted" + InstallModeDefault = "Default" +) + +// DeployWorkNamePrefix returns the prefix of the work name for the addon +func DeployWorkNamePrefix(addonName string) string { + return fmt.Sprintf("addon-%s-deploy", addonName) +} + +// DeployHostingWorkNamePrefix returns the prefix of the work name on hosting cluster for the addon +func DeployHostingWorkNamePrefix(addonNamespace, addonName string) string { + return fmt.Sprintf("%s-hosting-%s", DeployWorkNamePrefix(addonName), addonNamespace) +} + +// PreDeleteHookWorkName return the name of pre-delete work for the addon +func PreDeleteHookWorkName(addonName string) string { + return fmt.Sprintf("addon-%s-pre-delete", addonName) +} + +// PreDeleteHookHostingWorkName return the name of pre-delete work on hosting cluster for the addon +func PreDeleteHookHostingWorkName(addonNamespace, addonName string) string { + return fmt.Sprintf("%s-hosting-%s", PreDeleteHookWorkName(addonName), addonNamespace) +} + +// GetHostedModeInfo returns addon installation mode and hosting cluster name. +func GetHostedModeInfo(annotations map[string]string) (string, string) { + hostingClusterName, ok := annotations[addonv1alpha1.HostingClusterNameAnnotationKey] + if !ok { + return InstallModeDefault, "" + } + + return InstallModeHosted, hostingClusterName +} + +// GetHostedManifestLocation returns the location of the manifest in Hosted mode, if it is invalid will return error +func GetHostedManifestLocation(labels, annotations map[string]string) (string, bool, error) { + manifestLocation := annotations[addonv1alpha1.HostedManifestLocationAnnotationKey] + + // TODO: deprecate HostedManifestLocationLabelKey in the future release + if manifestLocation == "" { + manifestLocation = labels[addonv1alpha1.HostedManifestLocationLabelKey] + } + + switch manifestLocation { + case addonv1alpha1.HostedManifestLocationManagedValue, + addonv1alpha1.HostedManifestLocationHostingValue, + addonv1alpha1.HostedManifestLocationNoneValue: + return manifestLocation, true, nil + case "": + return "", false, nil + default: + return "", true, fmt.Errorf("not supported manifest location: %s", manifestLocation) + } +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go new file mode 100644 index 000000000..0c108435c --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go @@ -0,0 +1,276 @@ +package addonconfig + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/dynamic/dynamiclister" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +const ( + controllerName = "addon-config-controller" +) + +type enqueueFunc func(obj interface{}) + +// addonConfigController reconciles all interested addon config types (GroupVersionResource) on the hub. +type addonConfigController struct { + addonClient addonv1alpha1client.Interface + addonLister addonlisterv1alpha1.ManagedClusterAddOnLister + addonIndexer cache.Indexer + configListers map[schema.GroupResource]dynamiclister.Lister + queue workqueue.RateLimitingInterface + addonFilterFunc factory.EventFilterFunc + configGVRs map[schema.GroupVersionResource]bool + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister +} + +func NewAddonConfigController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + configInformerFactory dynamicinformer.DynamicSharedInformerFactory, + configGVRs map[schema.GroupVersionResource]bool, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + syncCtx := factory.NewSyncContext(controllerName) + + c := &addonConfigController{ + addonClient: addonClient, + addonLister: addonInformers.Lister(), + addonIndexer: addonInformers.Informer().GetIndexer(), + configListers: map[schema.GroupResource]dynamiclister.Lister{}, + queue: syncCtx.Queue(), + addonFilterFunc: addonFilterFunc, + configGVRs: configGVRs, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + } + + configInformers := c.buildConfigInformers(configInformerFactory, configGVRs) + + return factory.New(). + WithSyncContext(syncCtx). + WithInformersQueueKeysFunc(func(obj runtime.Object) []string { + key, _ := cache.MetaNamespaceKeyFunc(obj) + return []string{key} + }, addonInformers.Informer()). + WithBareInformers(configInformers...). + WithSync(c.sync).ToController(controllerName) +} + +func (c *addonConfigController) buildConfigInformers( + configInformerFactory dynamicinformer.DynamicSharedInformerFactory, + configGVRs map[schema.GroupVersionResource]bool, +) []factory.Informer { + configInformers := []factory.Informer{} + for gvrRaw := range configGVRs { + gvr := gvrRaw // copy the value since it will be used in the closure + genericInformer := configInformerFactory.ForResource(gvr) + indexInformer := genericInformer.Informer() + _, err := indexInformer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueueAddOnsByConfig(gvr), + UpdateFunc: func(oldObj, newObj interface{}) { + c.enqueueAddOnsByConfig(gvr)(newObj) + }, + DeleteFunc: c.enqueueAddOnsByConfig(gvr), + }, + ) + if err != nil { + utilruntime.HandleError(err) + } + configInformers = append(configInformers, indexInformer) + c.configListers[schema.GroupResource{Group: gvr.Group, Resource: gvr.Resource}] = dynamiclister.New(indexInformer.GetIndexer(), gvr) + } + return configInformers +} + +func (c *addonConfigController) enqueueAddOnsByConfig(gvr schema.GroupVersionResource) enqueueFunc { + return func(obj interface{}) { + namespaceName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error to get accessor of object: %v", obj)) + return + } + + objs, err := c.addonIndexer.ByIndex(index.AddonByConfig, + fmt.Sprintf("%s/%s/%s", gvr.Group, gvr.Resource, namespaceName)) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error to get addons: %v", err)) + return + } + + for _, obj := range objs { + if obj == nil { + continue + } + key, _ := cache.MetaNamespaceKeyFunc(obj) + c.queue.Add(key) + } + } +} + +func (c *addonConfigController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + addonNamespace, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + addon, err := c.addonLister.ManagedClusterAddOns(addonNamespace).Get(addonName) + if errors.IsNotFound(err) { + // addon could be deleted, ignore + return nil + } + if err != nil { + return err + } + + cma, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + // cluster management addon could be deleted, ignore + return nil + } + if err != nil { + return err + } + + if !c.addonFilterFunc(cma) { + return nil + } + + addonCopy := addon.DeepCopy() + if err := c.updateConfigSpecHashAndGenerations(addonCopy); err != nil { + return err + } + + return c.patchConfigReferences(ctx, addon, addonCopy) +} + +func (c *addonConfigController) updateConfigSpecHashAndGenerations(addon *addonapiv1alpha1.ManagedClusterAddOn) error { + supportedConfigSet := map[addonapiv1alpha1.ConfigGroupResource]bool{} + for _, config := range addon.Status.SupportedConfigs { + supportedConfigSet[config] = true + } + for index, configReference := range addon.Status.ConfigReferences { + + if !utils.ContainGR( + c.configGVRs, + configReference.ConfigGroupResource.Group, + configReference.ConfigGroupResource.Resource) { + continue + } + + lister, ok := c.configListers[schema.GroupResource{Group: configReference.ConfigGroupResource.Group, Resource: configReference.ConfigGroupResource.Resource}] + if !ok { + continue + } + + var config *unstructured.Unstructured + var err error + if configReference.Namespace == "" { + config, err = lister.Get(configReference.Name) + } else { + config, err = lister.Namespace(configReference.Namespace).Get(configReference.Name) + } + + if errors.IsNotFound(err) { + continue + } + + if err != nil { + return err + } + + // update LastObservedGeneration for all the configs in status + addon.Status.ConfigReferences[index].LastObservedGeneration = config.GetGeneration() + + // update desired spec hash only for the configs in spec + for _, addonconfig := range addon.Spec.Configs { + // do not update spec hash for unsupported configs + if _, ok := supportedConfigSet[addonconfig.ConfigGroupResource]; !ok { + continue + } + if configReference.DesiredConfig == nil { + continue + } + + if configReference.ConfigGroupResource == addonconfig.ConfigGroupResource && + configReference.DesiredConfig.ConfigReferent == addonconfig.ConfigReferent { + specHash, err := utils.GetSpecHash(config) + if err != nil { + return err + } + addon.Status.ConfigReferences[index].DesiredConfig.SpecHash = specHash + } + } + } + + return nil +} + +func (c *addonConfigController) patchConfigReferences(ctx context.Context, old, new *addonapiv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status.ConfigReferences, old.Status.ConfigReferences) { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + ConfigReferences: old.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + ConfigReferences: new.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(4).Infof("Patching addon %s/%s config reference with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, + new.Name, + types.MergePatchType, + patchBytes, + metav1.PatchOptions{}, + "status", + ) + return err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go new file mode 100644 index 000000000..4c57759e7 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go @@ -0,0 +1,138 @@ +package addoninstall + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + errorsutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +// managedClusterController reconciles instances of ManagedCluster on the hub. +type addonInstallController struct { + addonClient addonv1alpha1client.Interface + managedClusterLister clusterlister.ManagedClusterLister + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + agentAddons map[string]agent.AgentAddon +} + +func NewAddonInstallController( + addonClient addonv1alpha1client.Interface, + clusterInformers clusterinformers.ManagedClusterInformer, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + agentAddons map[string]agent.AgentAddon, +) factory.Controller { + c := &addonInstallController{ + addonClient: addonClient, + managedClusterLister: clusterInformers.Lister(), + managedClusterAddonLister: addonInformers.Lister(), + agentAddons: agentAddons, + } + + return factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{accessor.GetNamespace()} + }, + func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if _, ok := c.agentAddons[accessor.GetName()]; !ok { + return false + } + + return true + }, + addonInformers.Informer()). + WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{accessor.GetName()} + }, + clusterInformers.Informer(), + ). + WithSync(c.sync).ToController("addon-install-controller") +} + +func (c *addonInstallController) sync(ctx context.Context, syncCtx factory.SyncContext, clusterName string) error { + klog.V(4).Infof("Reconciling addon deploy on cluster %q", clusterName) + + cluster, err := c.managedClusterLister.Get(clusterName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + // if cluster is deleting, do not install addon + if !cluster.DeletionTimestamp.IsZero() { + klog.V(4).Infof("Cluster %q is deleting, skip addon deploy", clusterName) + return nil + } + + if value, ok := cluster.Annotations[addonapiv1alpha1.DisableAddonAutomaticInstallationAnnotationKey]; ok && + strings.EqualFold(value, "true") { + + klog.V(4).Infof("Cluster %q has annotation %q, skip addon deploy", + clusterName, addonapiv1alpha1.DisableAddonAutomaticInstallationAnnotationKey) + return nil + } + + var errs []error + + for addonName, addon := range c.agentAddons { + if addon.GetAgentAddonOptions().InstallStrategy == nil { + continue + } + + managedClusterFilter := addon.GetAgentAddonOptions().InstallStrategy.GetManagedClusterFilter() + if managedClusterFilter == nil { + continue + } + if !managedClusterFilter(cluster) { + klog.V(4).Infof("managed cluster filter is not match for addon %s on %s", addonName, clusterName) + continue + } + + err = c.applyAddon(ctx, addonName, clusterName, addon.GetAgentAddonOptions().InstallStrategy.InstallNamespace) + if err != nil { + errs = append(errs, err) + } + } + + return errorsutil.NewAggregate(errs) +} + +func (c *addonInstallController) applyAddon(ctx context.Context, addonName, clusterName, installNamespace string) error { + _, err := c.managedClusterAddonLister.ManagedClusterAddOns(clusterName).Get(addonName) + + // only create addon when it is missing, if user update the addon resource ,it should not be reverted + if errors.IsNotFound(err) { + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: addonName, + Namespace: clusterName, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: installNamespace, + }, + } + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(clusterName).Create(ctx, addon, metav1.CreateOptions{}) + return err + } + + return err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go new file mode 100644 index 000000000..113632498 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go @@ -0,0 +1,412 @@ +package agentdeploy + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + errorsutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + "open-cluster-management.io/api/utils/work/v1/workapplier" + "open-cluster-management.io/api/utils/work/v1/workbuilder" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" +) + +// addonDeployController deploy addon agent resources on the managed cluster. +type addonDeployController struct { + workApplier *workapplier.WorkApplier + workBuilder *workbuilder.WorkBuilder + addonClient addonv1alpha1client.Interface + managedClusterLister clusterlister.ManagedClusterLister + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + workIndexer cache.Indexer + agentAddons map[string]agent.AgentAddon +} + +func NewAddonDeployController( + workClient workv1client.Interface, + addonClient addonv1alpha1client.Interface, + clusterInformers clusterinformers.ManagedClusterInformer, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + workInformers workinformers.ManifestWorkInformer, + agentAddons map[string]agent.AgentAddon, +) factory.Controller { + c := &addonDeployController{ + workApplier: workapplier.NewWorkApplierWithTypedClient(workClient, workInformers.Lister()), + // the default manifest limit in a work is 500k + // TODO: make the limit configurable + workBuilder: workbuilder.NewWorkBuilder().WithManifestsLimit(500 * 1024), + addonClient: addonClient, + managedClusterLister: clusterInformers.Lister(), + managedClusterAddonLister: addonInformers.Lister(), + workIndexer: workInformers.Informer().GetIndexer(), + agentAddons: agentAddons, + } + + return factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if _, ok := c.agentAddons[accessor.GetName()]; !ok { + return false + } + + return true + }, + addonInformers.Informer()). + WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + // in hosted mode, need get the addon namespace from the AddonNamespaceLabel, because + // the namespaces of manifestWork and addon may be different. + // in default mode, the addon and manifestWork are in the cluster namespace. + if addonNamespace, ok := accessor.GetLabels()[addonapiv1alpha1.AddonNamespaceLabelKey]; ok { + return []string{fmt.Sprintf("%s/%s", addonNamespace, accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey])} + } + return []string{fmt.Sprintf("%s/%s", accessor.GetNamespace(), accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey])} + }, + func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if accessor.GetLabels() == nil { + return false + } + + // only watch the addon deploy/hook manifestWorks here. + addonName, ok := accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey] + if !ok { + return false + } + + if _, ok := c.agentAddons[addonName]; !ok { + return false + } + + if strings.HasPrefix(accessor.GetName(), constants.DeployWorkNamePrefix(addonName)) || + strings.HasPrefix(accessor.GetName(), constants.PreDeleteHookWorkName(addonName)) { + return true + } + return false + }, + workInformers.Informer(), + ). + WithSync(c.sync).ToController("addon-deploy-controller") +} + +type addonDeploySyncer interface { + sync(ctx context.Context, syncCtx factory.SyncContext, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) +} + +func (c *addonDeployController) getWorksByAddonFn(index string) func(addonName, addonNamespace string) ([]*workapiv1.ManifestWork, error) { + return func(addonName, addonNamespace string) ([]*workapiv1.ManifestWork, error) { + items, err := c.workIndexer.ByIndex(index, fmt.Sprintf("%s/%s", addonNamespace, addonName)) + if err != nil { + return nil, err + } + ret := make([]*workapiv1.ManifestWork, 0, len(items)) + for _, item := range items { + ret = append(ret, item.(*workapiv1.ManifestWork)) + } + + return ret, nil + } +} + +func (c *addonDeployController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + clusterName, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is not in format: namespace/name + return nil + } + + agentAddon, ok := c.agentAddons[addonName] + if !ok { + return nil + } + + addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(clusterName).Get(addonName) + if errors.IsNotFound(err) { + // need to find a way to clean up cache by addon + return nil + } + if err != nil { + return err + } + + // to deploy agents if there is RegistrationApplied condition. + if meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied) == nil { + return nil + } + + cluster, err := c.managedClusterLister.Get(clusterName) + if errors.IsNotFound(err) { + // the managedCluster is nil in this case,and sync cannot handle nil managedCluster. + // TODO: consider to force delete the addon and its deploy manifestWorks. + return nil + } + if err != nil { + return err + } + + syncers := []addonDeploySyncer{ + &defaultSyncer{ + buildWorks: c.buildDeployManifestWorks, + applyWork: c.applyWork, + getWorkByAddon: c.getWorksByAddonFn(index.ManifestWorkByAddon), + deleteWork: c.workApplier.Delete, + agentAddon: agentAddon, + }, + &hostedSyncer{ + buildWorks: c.buildDeployManifestWorks, + applyWork: c.applyWork, + deleteWork: c.workApplier.Delete, + getCluster: c.managedClusterLister.Get, + getWorkByAddon: c.getWorksByAddonFn(index.ManifestWorkByHostedAddon), + agentAddon: agentAddon}, + &defaultHookSyncer{ + buildWorks: c.buildHookManifestWork, + applyWork: c.applyWork, + agentAddon: agentAddon}, + &hostedHookSyncer{ + buildWorks: c.buildHookManifestWork, + applyWork: c.applyWork, + deleteWork: c.workApplier.Delete, + getCluster: c.managedClusterLister.Get, + getWorkByAddon: c.getWorksByAddonFn(index.ManifestWorkHookByHostedAddon), + agentAddon: agentAddon}, + &healthCheckSyncer{ + getWorkByAddon: c.getWorksByAddonFn(index.ManifestWorkByAddon), + agentAddon: agentAddon, + }, + } + + oldAddon := addon + addon = addon.DeepCopy() + var errs []error + for _, s := range syncers { + var err error + addon, err = s.sync(ctx, syncCtx, cluster, addon) + if err != nil { + errs = append(errs, err) + } + } + + if err = c.updateAddon(ctx, addon, oldAddon); err != nil { + return err + } + return errorsutil.NewAggregate(errs) +} + +// updateAddon updates finalizers and conditions of addon. +// to avoid conflict updateAddon updates finalizers firstly if finalizers has change. +func (c *addonDeployController) updateAddon(ctx context.Context, new, old *addonapiv1alpha1.ManagedClusterAddOn) error { + if !equality.Semantic.DeepEqual(new.GetFinalizers(), old.GetFinalizers()) { + _, err := c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Update(ctx, new, metav1.UpdateOptions{}) + return err + } + + if equality.Semantic.DeepEqual(new.Status.HealthCheck, old.Status.HealthCheck) && + equality.Semantic.DeepEqual(new.Status.Conditions, old.Status.Conditions) { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + HealthCheck: old.Status.HealthCheck, + Conditions: old.Status.Conditions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + HealthCheck: new.Status.HealthCheck, + Conditions: new.Status.Conditions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s condition with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +func (c *addonDeployController) applyWork(ctx context.Context, appliedType string, + work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) { + + work, err := c.workApplier.Apply(ctx, work) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to apply manifestWork: %v", err), + }) + return work, err + } + + // Update addon status based on work's status + WorkAppliedCond := meta.FindStatusCondition(work.Status.Conditions, workapiv1.WorkApplied) + switch { + case WorkAppliedCond == nil: + return work, nil + case WorkAppliedCond.Status == metav1.ConditionTrue: + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, + Message: "manifests of addon are applied successfully", + }) + default: + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplyFailed, + Message: "failed to apply the manifests of addon", + }) + } + + return work, nil +} + +func (c *addonDeployController) buildDeployManifestWorks(installMode, workNamespace string, + cluster *clusterv1.ManagedCluster, existingWorks []*workapiv1.ManifestWork, + addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) { + var appliedType string + var addonWorkBuilder *addonWorksBuilder + + agentAddon := c.agentAddons[addon.Name] + if agentAddon == nil { + return nil, nil, fmt.Errorf("failed to get agentAddon") + } + + switch installMode { + case constants.InstallModeHosted: + appliedType = addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied + addonWorkBuilder = newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + case constants.InstallModeDefault: + appliedType = addonapiv1alpha1.ManagedClusterAddOnManifestApplied + addonWorkBuilder = newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + default: + return nil, nil, fmt.Errorf("invalid install mode %v", installMode) + } + + objects, err := agentAddon.Manifests(cluster, addon) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), + }) + return nil, nil, err + } + if len(objects) == 0 { + return nil, nil, nil + } + + manifestOptions := getManifestConfigOption(agentAddon) + existingWorksCopy := []workapiv1.ManifestWork{} + for _, work := range existingWorks { + existingWorksCopy = append(existingWorksCopy, *work) + } + appliedWorks, deleteWorks, err = addonWorkBuilder.BuildDeployWorks(workNamespace, addon, existingWorksCopy, objects, manifestOptions) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to build manifestwork: %v", err), + }) + return nil, nil, err + } + return appliedWorks, deleteWorks, nil +} +func (c *addonDeployController) buildHookManifestWork(installMode, workNamespace string, + cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) { + var appliedType string + var addonWorkBuilder *addonWorksBuilder + + agentAddon := c.agentAddons[addon.Name] + if agentAddon == nil { + return nil, fmt.Errorf("failed to get agentAddon") + } + + switch installMode { + case constants.InstallModeHosted: + appliedType = addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied + addonWorkBuilder = newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + case constants.InstallModeDefault: + appliedType = addonapiv1alpha1.ManagedClusterAddOnManifestApplied + addonWorkBuilder = newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + default: + return nil, fmt.Errorf("invalid install mode %v", installMode) + } + + objects, err := agentAddon.Manifests(cluster, addon) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), + }) + return nil, err + } + if len(objects) == 0 { + return nil, nil + } + + hookWork, err := addonWorkBuilder.BuildHookWork(workNamespace, addon, objects) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to build manifestwork: %v", err), + }) + return nil, err + } + return hookWork, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go new file mode 100644 index 000000000..bcca12205 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go @@ -0,0 +1,77 @@ +package agentdeploy + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +type defaultHookSyncer struct { + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + applyWork func(ctx context.Context, appliedType string, + work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + agentAddon agent.AgentAddon +} + +func (s *defaultHookSyncer) sync(ctx context.Context, + syncCtx factory.SyncContext, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) { + deployWorkNamespace := addon.Namespace + + hookWork, err := s.buildWorks(constants.InstallModeDefault, deployWorkNamespace, cluster, addon) + if err != nil { + return addon, err + } + + if hookWork == nil { + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonPreDeleteHookFinalizer) + return addon, nil + } + + if addonAddFinalizer(addon, addonapiv1alpha1.AddonPreDeleteHookFinalizer) { + return addon, nil + } + + if addon.DeletionTimestamp.IsZero() { + return addon, nil + } + + // will deploy the pre-delete hook manifestWork when the addon is deleting + hookWork, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnManifestApplied, hookWork, addon) + if err != nil { + return addon, err + } + + // TODO: will surface more message here + if hookWorkIsCompleted(hookWork) { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, + Status: metav1.ConditionTrue, + Reason: "HookManifestIsCompleted", + Message: fmt.Sprintf("hook manifestWork %v is completed.", hookWork.Name), + }) + + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonPreDeleteHookFinalizer) + return addon, nil + } + + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, + Status: metav1.ConditionFalse, + Reason: "HookManifestIsNotCompleted", + Message: fmt.Sprintf("hook manifestWork %v is not completed.", hookWork.Name), + }) + + return addon, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go new file mode 100644 index 000000000..5ffa6bd4d --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go @@ -0,0 +1,73 @@ +package agentdeploy + +import ( + "context" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +type defaultSyncer struct { + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, existingWorks []*workapiv1.ManifestWork, + addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) + + applyWork func(ctx context.Context, appliedType string, + work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + + getWorkByAddon func(addonName, addonNamespace string) ([]*workapiv1.ManifestWork, error) + + deleteWork func(ctx context.Context, workNamespace, workName string) error + + agentAddon agent.AgentAddon +} + +func (s *defaultSyncer) sync(ctx context.Context, + syncCtx factory.SyncContext, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) { + deployWorkNamespace := addon.Namespace + + var errs []error + + if !addon.DeletionTimestamp.IsZero() { + return addon, nil + } + + // waiting for the addon to be deleted when cluster is deleting. + // TODO: consider to delete addon in this scenario. + if !cluster.DeletionTimestamp.IsZero() { + return addon, nil + } + + currentWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace) + if err != nil { + return addon, err + } + + deployWorks, deleteWorks, err := s.buildWorks(constants.InstallModeDefault, deployWorkNamespace, cluster, currentWorks, addon) + if err != nil { + return addon, err + } + + for _, deleteWork := range deleteWorks { + err = s.deleteWork(ctx, deployWorkNamespace, deleteWork.Name) + if err != nil { + errs = append(errs, err) + } + } + + for _, deployWork := range deployWorks { + _, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnManifestApplied, deployWork, addon) + if err != nil { + errs = append(errs, err) + } + } + + return addon, utilerrors.NewAggregate(errs) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go new file mode 100644 index 000000000..97ffe032f --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go @@ -0,0 +1,172 @@ +package agentdeploy + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +type healthCheckSyncer struct { + getWorkByAddon func(addonName, addonNamespace string) ([]*workapiv1.ManifestWork, error) + agentAddon agent.AgentAddon +} + +func (s *healthCheckSyncer) sync(ctx context.Context, + syncCtx factory.SyncContext, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) { + // reconcile health check mode + var expectedHealthCheckMode addonapiv1alpha1.HealthCheckMode + + if s.agentAddon.GetAgentAddonOptions().HealthProber == nil { + return addon, nil + } + + switch s.agentAddon.GetAgentAddonOptions().HealthProber.Type { + case agent.HealthProberTypeWork, agent.HealthProberTypeNone: + expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeCustomized + case agent.HealthProberTypeLease: + expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeLease + default: + expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeLease + } + + if expectedHealthCheckMode != addon.Status.HealthCheck.Mode { + addon.Status.HealthCheck.Mode = expectedHealthCheckMode + } + + err := s.probeAddonStatus(addon) + return addon, err +} + +func (s *healthCheckSyncer) probeAddonStatus(addon *addonapiv1alpha1.ManagedClusterAddOn) error { + if s.agentAddon.GetAgentAddonOptions().HealthProber.Type != agent.HealthProberTypeWork { + return nil + } + + if s.agentAddon.GetAgentAddonOptions().HealthProber.WorkProber == nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkApply, + Message: "Addon manifestWork is applied", + }) + return nil + } + + // update Available condition after addon manifestWorks are applied + if meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) == nil { + return nil + } + + addonWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace) + if err != nil || len(addonWorks) == 0 { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotFound, + Message: "Addon manifestWork is not found", + }) + return err + } + + manifestConditions := []workapiv1.ManifestCondition{} + for _, work := range addonWorks { + if !strings.HasPrefix(work.Name, constants.DeployWorkNamePrefix(addon.Name)) { + continue + } + // Check the overall work available condition at first. + workCond := meta.FindStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable) + switch { + case workCond == nil: + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotApply, + Message: "Addon manifestWork is not applied yet", + }) + return nil + case workCond.Status == metav1.ConditionFalse: + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotApply, + Message: workCond.Message, + }) + return nil + } + + manifestConditions = append(manifestConditions, work.Status.ResourceStatus.Manifests...) + } + + probeFields := s.agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.ProbeFields + + for _, field := range probeFields { + result := findResultByIdentifier(field.ResourceIdentifier, manifestConditions) + // if no results are returned. it is possible that work agent has not returned the feedback value. + // mark condition to unknown + if result == nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonNoProbeResult, + Message: "Probe results are not returned", + }) + return nil + } + + err := s.agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.HealthCheck(field.ResourceIdentifier, *result) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("Probe addon unavailable with err %v", err), + }) + return nil + } + } + + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeAvailable, + Message: "Addon is available", + }) + return nil +} + +func findResultByIdentifier(identifier workapiv1.ResourceIdentifier, manifestConditions []workapiv1.ManifestCondition) *workapiv1.StatusFeedbackResult { + for _, status := range manifestConditions { + if identifier.Group != status.ResourceMeta.Group { + continue + } + if identifier.Resource != status.ResourceMeta.Resource { + continue + } + if identifier.Name != status.ResourceMeta.Name { + continue + } + if identifier.Namespace != status.ResourceMeta.Namespace { + continue + } + + if len(status.StatusFeedbacks.Values) == 0 { + return nil + } + + return &status.StatusFeedbacks + } + + return nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go new file mode 100644 index 000000000..9f628166b --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go @@ -0,0 +1,150 @@ +package agentdeploy + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +type hostedHookSyncer struct { + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + + applyWork func(ctx context.Context, appliedType string, + work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + + deleteWork func(ctx context.Context, workNamespace, workName string) error + + getWorkByAddon func(addonName, addonNamespace string) ([]*workapiv1.ManifestWork, error) + + getCluster func(clusterName string) (*clusterv1.ManagedCluster, error) + + agentAddon agent.AgentAddon +} + +func (s *hostedHookSyncer) sync(ctx context.Context, + syncCtx factory.SyncContext, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) { + + // Hosted mode is not enabled, will not deploy any resource on the hosting cluster + if !s.agentAddon.GetAgentAddonOptions().HostedModeEnabled { + return addon, nil + } + + installMode, hostingClusterName := constants.GetHostedModeInfo(addon.GetAnnotations()) + if installMode != constants.InstallModeHosted { + return addon, nil + } + + // Get Hosting Cluster, check whether the hosting cluster is a managed cluster of the hub + // TODO: check whether the hosting cluster of the addon is the same hosting cluster of the klusterlet + hostingCluster, err := s.getCluster(hostingClusterName) + if errors.IsNotFound(err) { + if err = s.cleanupHookWork(ctx, addon); err != nil { + return addon, err + } + + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) + return addon, nil + } + if err != nil { + return addon, err + } + + if !hostingCluster.DeletionTimestamp.IsZero() { + if err = s.cleanupHookWork(ctx, addon); err != nil { + return addon, err + } + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) + return addon, nil + } + hookWork, err := s.buildWorks(constants.InstallModeHosted, hostingClusterName, cluster, addon) + if err != nil { + return addon, err + } + + if hookWork == nil { + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) + return addon, nil + } + + // will deploy the pre-delete hook manifestWork when the addon is deleting + if addon.DeletionTimestamp.IsZero() { + addonAddFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) + return addon, nil + } + + // the hook work is completed if there is no HostingPreDeleteHookFinalizer when the addon is deleting. + if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { + return addon, nil + } + + hookWork, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, hookWork, addon) + if err != nil { + return addon, err + } + + // TODO: will surface more message here + if hookWorkIsCompleted(hookWork) { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, + Status: metav1.ConditionTrue, + Reason: "HookManifestIsCompleted", + Message: fmt.Sprintf("hook manifestWork %v is completed.", hookWork.Name), + }) + + if err = s.cleanupHookWork(ctx, addon); err != nil { + return addon, err + } + if addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { + return addon, err + } + return addon, nil + } + + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, + Status: metav1.ConditionFalse, + Reason: "HookManifestIsNotCompleted", + Message: fmt.Sprintf("hook manifestWork %v is not completed.", hookWork.Name), + }) + + return addon, nil + +} + +// cleanupHookWork will delete the hosting pre-delete hook manifestWork and remove the finalizer, +// if the hostingClusterName is empty, will try to find out the hosting cluster by manifestWork labels and do the cleanup +func (s *hostedHookSyncer) cleanupHookWork(ctx context.Context, + addon *addonapiv1alpha1.ManagedClusterAddOn) (err error) { + if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { + return nil + } + + currentWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace) + if err != nil { + return err + } + + var errs []error + for _, work := range currentWorks { + err = s.deleteWork(ctx, work.Namespace, work.Name) + if err != nil { + errs = append(errs, err) + } + } + + return utilerrors.NewAggregate(errs) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go new file mode 100644 index 000000000..4da04d225 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go @@ -0,0 +1,164 @@ +package agentdeploy + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +type hostedSyncer struct { + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, existingWorks []*workapiv1.ManifestWork, + addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) + + applyWork func(ctx context.Context, appliedType string, + work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + + deleteWork func(ctx context.Context, workNamespace, workName string) error + + getWorkByAddon func(addonName, addonNamespace string) ([]*workapiv1.ManifestWork, error) + + getCluster func(clusterName string) (*clusterv1.ManagedCluster, error) + + agentAddon agent.AgentAddon +} + +func (s *hostedSyncer) sync(ctx context.Context, + syncCtx factory.SyncContext, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) { + // Hosted mode is not enabled, will not deploy any resource on the hosting cluster + if !s.agentAddon.GetAgentAddonOptions().HostedModeEnabled { + return addon, nil + } + + installMode, hostingClusterName := constants.GetHostedModeInfo(addon.GetAnnotations()) + if installMode != constants.InstallModeHosted { + // the installMode is changed from hosted to default, cleanup the hosting resources + if err := s.cleanupDeployWork(ctx, addon); err != nil { + return addon, err + } + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) + return addon, nil + } + + // Get Hosting Cluster, check whether the hosting cluster is a managed cluster of the hub + // TODO: check whether the hosting cluster of the addon is the same hosting cluster of the klusterlet + hostingCluster, err := s.getCluster(hostingClusterName) + if errors.IsNotFound(err) { + if err = s.cleanupDeployWork(ctx, addon); err != nil { + return addon, err + } + + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnHostingClusterValidity, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.HostingClusterValidityReasonInvalid, + Message: fmt.Sprintf("hosting cluster %s is not a managed cluster of the hub", hostingClusterName), + }) + + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) + return addon, nil + } + if err != nil { + return addon, err + } + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnHostingClusterValidity, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.HostingClusterValidityReasonValid, + Message: fmt.Sprintf("hosting cluster %s is a managed cluster of the hub", hostingClusterName), + }) + + if !hostingCluster.DeletionTimestamp.IsZero() { + if err = s.cleanupDeployWork(ctx, addon); err != nil { + return addon, err + } + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) + return addon, nil + } + + if !addon.DeletionTimestamp.IsZero() { + // clean up the deploy work until the hook work is completed + if addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { + return addon, nil + } + + if err = s.cleanupDeployWork(ctx, addon); err != nil { + return addon, err + } + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) + return addon, nil + } + + if addonAddFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) { + return addon, nil + } + + // waiting for the addon to be deleted when cluster is deleting. + // TODO: consider to delete addon in this scenario. + if !cluster.DeletionTimestamp.IsZero() { + return addon, nil + } + + currentWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace) + if err != nil { + return addon, err + } + + deployWorks, deleteWorks, err := s.buildWorks(constants.InstallModeHosted, hostingClusterName, cluster, currentWorks, addon) + if err != nil { + return addon, err + } + + var errs []error + for _, deleteWork := range deleteWorks { + err = s.deleteWork(ctx, deleteWork.Namespace, deleteWork.Name) + if err != nil { + errs = append(errs, err) + } + } + + for _, deployWork := range deployWorks { + _, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, deployWork, addon) + if err != nil { + errs = append(errs, err) + } + } + + return addon, utilerrors.NewAggregate(errs) +} + +// cleanupDeployWork will delete the hosting manifestWork and cache. if the hostingClusterName is empty, will try +// to find out the hosting cluster by manifestWork labels and do the cleanup. +func (s *hostedSyncer) cleanupDeployWork(ctx context.Context, + addon *addonapiv1alpha1.ManagedClusterAddOn) (err error) { + if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) { + return nil + } + + currentWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace) + if err != nil { + return err + } + + var errs []error + for _, work := range currentWorks { + err = s.deleteWork(ctx, work.Namespace, work.Name) + if err != nil { + errs = append(errs, err) + } + } + + return utilerrors.NewAggregate(errs) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go new file mode 100644 index 000000000..b550cae6e --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go @@ -0,0 +1,570 @@ +package agentdeploy + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + "open-cluster-management.io/api/utils/work/v1/workbuilder" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" +) + +func addonHasFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer string) bool { + for _, f := range addon.Finalizers { + if f == finalizer { + return true + } + } + return false +} + +func addonRemoveFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer string) bool { + var rst []string + for _, f := range addon.Finalizers { + if f == finalizer { + continue + } + // remove deperecated finalizers also + if f == addonapiv1alpha1.AddonDeprecatedHostingManifestFinalizer || + f == addonapiv1alpha1.AddonDeprecatedPreDeleteHookFinalizer || + f == addonapiv1alpha1.AddonDeprecatedHostingPreDeleteHookFinalizer { + continue + } + rst = append(rst, f) + } + if len(rst) != len(addon.Finalizers) { + addon.SetFinalizers(rst) + return true + } + return false +} + +func addonAddFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer string) bool { + if addon.Finalizers == nil { + addon.SetFinalizers([]string{finalizer}) + return true + } + + var rst []string + for _, f := range addon.Finalizers { + // remove deperecated finalizers also + if f == addonapiv1alpha1.AddonDeprecatedHostingManifestFinalizer || + f == addonapiv1alpha1.AddonDeprecatedPreDeleteHookFinalizer || + f == addonapiv1alpha1.AddonDeprecatedHostingPreDeleteHookFinalizer { + continue + } + rst = append(rst, f) + } + + for _, f := range addon.Finalizers { + if f == finalizer { + return false + } + } + + rst = append(rst, finalizer) + addon.SetFinalizers(rst) + return true +} + +func newManifestWork(addonNamespace, addonName, clusterName string, manifests []workapiv1.Manifest, + manifestWorkNameFunc func(addonNamespace, addonName string) string) *workapiv1.ManifestWork { + if len(manifests) == 0 { + return nil + } + + work := &workapiv1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: manifestWorkNameFunc(addonNamespace, addonName), + Namespace: clusterName, + Labels: map[string]string{ + addonapiv1alpha1.AddonLabelKey: addonName, + }, + }, + Spec: workapiv1.ManifestWorkSpec{ + Workload: workapiv1.ManifestsTemplate{ + Manifests: manifests, + }, + }, + } + + // if the addon namespace is not equal with the manifestwork namespace(cluster name), add the addon namespace label + if addonNamespace != clusterName { + work.Labels[addonapiv1alpha1.AddonNamespaceLabelKey] = addonNamespace + } + return work +} + +// isPreDeleteHookObject check the object is a pre-delete hook resources. +// currently, we only support job and pod as hook resources. +// we use WellKnownStatus here to get the job/pad status fields to check if the job/pod is completed. +func (b *addonWorksBuilder) isPreDeleteHookObject(obj runtime.Object) (bool, *workapiv1.ManifestConfigOption) { + var resource string + gvk := obj.GetObjectKind().GroupVersionKind() + switch gvk.Kind { + case "Job": + resource = "jobs" + case "Pod": + resource = "pods" + default: + return false, nil + } + + accessor, err := meta.Accessor(obj) + if err != nil { + return false, nil + } + + labels := accessor.GetLabels() + annotations := accessor.GetAnnotations() + + // TODO: deprecate PreDeleteHookLabel in the future release. + _, hasPreDeleteLabel := labels[addonapiv1alpha1.AddonPreDeleteHookLabelKey] + _, hasPreDeleteAnnotation := annotations[addonapiv1alpha1.AddonPreDeleteHookAnnotationKey] + if !hasPreDeleteLabel && !hasPreDeleteAnnotation { + return false, nil + } + + return true, &workapiv1.ManifestConfigOption{ + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: gvk.Group, + Resource: resource, + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + } +} +func newAddonWorksBuilder(hostedModeEnabled bool, workBuilder *workbuilder.WorkBuilder) *addonWorksBuilder { + return &addonWorksBuilder{ + processor: &managedManifest{}, + hostedModeEnabled: hostedModeEnabled, + workBuilder: workBuilder, + } +} + +func newHostingAddonWorksBuilder(hostedModeEnabled bool, workBuilder *workbuilder.WorkBuilder) *addonWorksBuilder { + return &addonWorksBuilder{ + processor: &hostingManifest{}, + hostedModeEnabled: hostedModeEnabled, + workBuilder: workBuilder, + } +} + +type addonWorksBuilder struct { + processor manifestProcessor + hostedModeEnabled bool + workBuilder *workbuilder.WorkBuilder +} + +type manifestProcessor interface { + deployable(hostedModeEnabled bool, installMode string, obj runtime.Object) (bool, error) + manifestWorkNamePrefix(addonNamespace, addonName string) string + preDeleteHookManifestWorkName(addonNamespace, addonName string) string +} + +// hostingManifest process manifests which will be deployed on the hosting cluster +type hostingManifest struct { +} + +func (m *hostingManifest) deployable(hostedModeEnabled bool, installMode string, obj runtime.Object) (bool, error) { + if !hostedModeEnabled { + // hosted mode disabled, will not deploy any resource on the hosting cluster + return false, nil + } + + accessor, err := meta.Accessor(obj) + if err != nil { + return false, nil + } + + location, exist, err := constants.GetHostedManifestLocation(accessor.GetLabels(), accessor.GetAnnotations()) + if err != nil { + return false, err + } + if installMode != constants.InstallModeHosted { + return false, nil + } + + if exist && location == addonapiv1alpha1.HostedManifestLocationHostingValue { + klog.V(4).Infof("will deploy the manifest %s/%s on the hosting cluster in Hosted mode", + accessor.GetNamespace(), accessor.GetName()) + return true, nil + } + + return false, nil +} + +func (m *hostingManifest) manifestWorkNamePrefix(addonNamespace, addonName string) string { + return constants.DeployHostingWorkNamePrefix(addonNamespace, addonName) +} + +func (m *hostingManifest) preDeleteHookManifestWorkName(addonNamespace, addonName string) string { + return constants.PreDeleteHookHostingWorkName(addonNamespace, addonName) +} + +// managedManifest process manifests which will be deployed on the managed cluster +type managedManifest struct { +} + +func (m *managedManifest) deployable(hostedModeEnabled bool, installMode string, obj runtime.Object) (bool, error) { + if !hostedModeEnabled { + // hosted mode disabled, will deploy all resources on the managed cluster + return true, nil + } + + accessor, err := meta.Accessor(obj) + if err != nil { + return false, nil + } + + location, exist, err := constants.GetHostedManifestLocation(accessor.GetLabels(), accessor.GetAnnotations()) + if err != nil { + return false, err + } + + if installMode != constants.InstallModeHosted { + return true, nil + } + + if !exist || location == addonapiv1alpha1.HostedManifestLocationManagedValue { + klog.V(4).Infof("will deploy the manifest %s/%s on the managed cluster in Hosted mode", + accessor.GetNamespace(), accessor.GetName()) + return true, nil + } + + return false, nil +} + +func (m *managedManifest) manifestWorkNamePrefix(addonNamespace, addonName string) string { + return constants.DeployWorkNamePrefix(addonName) +} + +func (m *managedManifest) preDeleteHookManifestWorkName(addonNamespace, addonName string) string { + return constants.PreDeleteHookWorkName(addonName) +} + +// BuildDeployWorks returns the deploy manifestWorks. if there is no manifest need +// to deploy, will return nil. +func (b *addonWorksBuilder) BuildDeployWorks(addonWorkNamespace string, + addon *addonapiv1alpha1.ManagedClusterAddOn, + existingWorks []workapiv1.ManifestWork, + objects []runtime.Object, + manifestOptions []workapiv1.ManifestConfigOption) (deployWorks, deleteWorks []*workapiv1.ManifestWork, err error) { + var deployObjects []runtime.Object + var owner *metav1.OwnerReference + installMode, _ := constants.GetHostedModeInfo(addon.GetAnnotations()) + + // This owner is only added to the manifestWork deployed in managed cluster ns. + // the manifestWork in managed cluster ns is cleaned up via the addon ownerRef, so need to add the owner. + // the manifestWork in hosting cluster ns is cleaned up by its controller since it and its addon cross ns. + owner = metav1.NewControllerRef(addon, addonapiv1alpha1.GroupVersion.WithKind("ManagedClusterAddOn")) + + var deletionOrphaningRules []workapiv1.OrphaningRule + for _, object := range objects { + deployable, err := b.processor.deployable(b.hostedModeEnabled, installMode, object) + if err != nil { + return nil, nil, err + } + if !deployable { + continue + } + + isHookObject, _ := b.isPreDeleteHookObject(object) + if isHookObject { + continue + } + + rule, err := getDeletionOrphaningRule(object) + if err != nil { + return nil, nil, err + } + if rule != nil { + deletionOrphaningRules = append(deletionOrphaningRules, *rule) + } + + deployObjects = append(deployObjects, object) + } + if len(deployObjects) == 0 { + return nil, nil, nil + } + + var deletionOption *workapiv1.DeleteOption + if len(deletionOrphaningRules) != 0 { + deletionOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: deletionOrphaningRules, + }, + } + } + + annotations, err := configsToAnnotations(addon.Status.ConfigReferences) + if err != nil { + return nil, nil, err + } + + return b.workBuilder.Build(deployObjects, + newAddonWorkObjectMeta(b.processor.manifestWorkNamePrefix(addon.Namespace, addon.Name), addon.Name, addon.Namespace, addonWorkNamespace, owner), + workbuilder.ExistingManifestWorksOption(existingWorks), + workbuilder.ManifestConfigOption(manifestOptions), + workbuilder.ManifestAnnotations(annotations), + workbuilder.DeletionOption(deletionOption)) +} + +// BuildHookWork returns the preDelete manifestWork, if there is no manifest need +// to deploy, will return nil. +func (b *addonWorksBuilder) BuildHookWork(addonWorkNamespace string, + addon *addonapiv1alpha1.ManagedClusterAddOn, + objects []runtime.Object) (hookWork *workapiv1.ManifestWork, err error) { + var hookManifests []workapiv1.Manifest + var hookManifestConfigs []workapiv1.ManifestConfigOption + var owner *metav1.OwnerReference + installMode, _ := constants.GetHostedModeInfo(addon.GetAnnotations()) + + // only set addon as the owner of works in default mode. should not set owner in hosted mode. + if installMode == constants.InstallModeDefault { + owner = metav1.NewControllerRef(addon, addonapiv1alpha1.GroupVersion.WithKind("ManagedClusterAddOn")) + } + + for _, object := range objects { + deployable, err := b.processor.deployable(b.hostedModeEnabled, installMode, object) + if err != nil { + return nil, err + } + if !deployable { + continue + } + + isHookObject, manifestConfig := b.isPreDeleteHookObject(object) + if !isHookObject { + continue + } + rawObject, err := runtime.Encode(unstructured.UnstructuredJSONScheme, object) + if err != nil { + return nil, err + } + + hookManifests = append(hookManifests, workapiv1.Manifest{RawExtension: runtime.RawExtension{Raw: rawObject}}) + hookManifestConfigs = append(hookManifestConfigs, *manifestConfig) + } + if len(hookManifests) == 0 { + return nil, nil + } + + hookWork = newManifestWork(addon.Namespace, addon.Name, addonWorkNamespace, hookManifests, b.processor.preDeleteHookManifestWorkName) + if owner != nil { + hookWork.OwnerReferences = []metav1.OwnerReference{*owner} + } + hookWork.Spec.ManifestConfigs = hookManifestConfigs + if addon.Namespace != addonWorkNamespace { + hookWork.Labels[addonapiv1alpha1.AddonNamespaceLabelKey] = addon.Namespace + } + return hookWork, nil +} + +func FindManifestValue( + resourceStatus workapiv1.ManifestResourceStatus, + identifier workapiv1.ResourceIdentifier, + valueName string) workapiv1.FieldValue { + for _, manifest := range resourceStatus.Manifests { + values := manifest.StatusFeedbacks.Values + if len(values) == 0 { + return workapiv1.FieldValue{} + } + resourceMeta := manifest.ResourceMeta + if identifier.Group == resourceMeta.Group && + identifier.Resource == resourceMeta.Resource && + identifier.Name == resourceMeta.Name && + identifier.Namespace == resourceMeta.Namespace { + for _, v := range values { + if v.Name == valueName { + return v.Value + } + } + } + } + return workapiv1.FieldValue{} +} + +// hookWorkIsCompleted checks the hook resources are completed. +// hookManifestWork is completed if all resources are completed. +// currently, we only support job and pod as hook manifest. +// job is completed if the Completed condition of status is true. +// pod is completed if the phase of status is Succeeded. +func hookWorkIsCompleted(hookWork *workapiv1.ManifestWork) bool { + if hookWork == nil { + return false + } + if !meta.IsStatusConditionTrue(hookWork.Status.Conditions, workapiv1.WorkAvailable) { + return false + } + + if len(hookWork.Spec.ManifestConfigs) == 0 { + klog.Errorf("the hook manifestWork should have manifest configs,but got 0.") + return false + } + for _, manifestConfig := range hookWork.Spec.ManifestConfigs { + switch manifestConfig.ResourceIdentifier.Resource { + case "jobs": + value := FindManifestValue(hookWork.Status.ResourceStatus, manifestConfig.ResourceIdentifier, "JobComplete") + if value.Type == "" { + return false + } + if value.String == nil { + return false + } + if *value.String != "True" { + return false + } + + case "pods": + value := FindManifestValue(hookWork.Status.ResourceStatus, manifestConfig.ResourceIdentifier, "PodPhase") + if value.Type == "" { + return false + } + if value.String == nil { + return false + } + if *value.String != "Succeeded" { + return false + } + default: + return false + } + } + + return true +} + +func newAddonWorkObjectMeta(namePrefix, addonName, addonNamespace, workNamespace string, + owner *metav1.OwnerReference) workbuilder.GenerateManifestWorkObjectMeta { + return func(index int) metav1.ObjectMeta { + objectMeta := metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", namePrefix, index), + Namespace: workNamespace, + Labels: map[string]string{ + addonapiv1alpha1.AddonLabelKey: addonName, + }, + } + // This owner is only added to the manifestWork deployed in managed cluster ns. + // the manifestWork in managed cluster ns is cleaned up via the addon ownerRef, so need to add the owner. + // the manifestWork in hosting cluster ns is cleaned up by its controller since it and its addon cross ns. + if addonNamespace == workNamespace && owner != nil { + objectMeta.OwnerReferences = []metav1.OwnerReference{*owner} + } + // if the addon namespace is not equal with the manifestwork namespace(cluster name), add the addon namespace label + if addonNamespace != workNamespace { + objectMeta.Labels[addonapiv1alpha1.AddonNamespaceLabelKey] = addonNamespace + } + return objectMeta + } +} + +func getManifestConfigOption(agentAddon agent.AgentAddon) []workapiv1.ManifestConfigOption { + manifestConfigs := []workapiv1.ManifestConfigOption{} + + if agentAddon.GetAgentAddonOptions().HealthProber != nil && + agentAddon.GetAgentAddonOptions().HealthProber.Type == agent.HealthProberTypeWork && + agentAddon.GetAgentAddonOptions().HealthProber.WorkProber != nil { + probeRules := agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.ProbeFields + for _, rule := range probeRules { + manifestConfigs = append(manifestConfigs, workapiv1.ManifestConfigOption{ + ResourceIdentifier: rule.ResourceIdentifier, + FeedbackRules: rule.ProbeRules, + }) + } + } + + if updaters := agentAddon.GetAgentAddonOptions().Updaters; updaters != nil { + for _, updater := range updaters { + manifestConfigs = append(manifestConfigs, workapiv1.ManifestConfigOption{ + ResourceIdentifier: updater.ResourceIdentifier, + UpdateStrategy: &updater.UpdateStrategy, + }) + } + } + + return manifestConfigs +} + +func getDeletionOrphaningRule(obj runtime.Object) (*workapiv1.OrphaningRule, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + annotations := accessor.GetAnnotations() + if _, ok := annotations[addonapiv1alpha1.DeletionOrphanAnnotationKey]; !ok { + return nil, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + plural, _ := meta.UnsafeGuessKindToResource(gvk) + + rule := &workapiv1.OrphaningRule{ + Group: plural.Group, + Resource: plural.Resource, + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + } + return rule, nil +} + +// convert config reference to annotations. +func configsToAnnotations(configReference []addonapiv1alpha1.ConfigReference) (map[string]string, error) { + if len(configReference) == 0 { + return nil, nil + } + + // converts the configReference into a map, key is config name, value is spec hash. + specHashMap := ConfigsToMap(configReference) + + // converts the map into a JSON byte string. + jsonBytes, err := json.Marshal(specHashMap) + if err != nil { + return nil, err + } + + // return a map with key as "open-cluster-management.io/config-spec-hash" and value is the JSON byte string. + // For example: + // open-cluster-management.io/config-spec-hash: '{"addonhubconfigs.addon.open-cluster-management.io//default":"613d134a2ec072a8a6451af913979f496d657ef5", + // "addondeploymentconfigs.addon.open-cluster-management.io/open-cluster-management/default":"cca7df9188fb920dcfab374940452393e2037619"}' + return map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: string(jsonBytes), + }, nil +} + +// configsToMap returns a map stores the config name as the key and config spec hash as the value. +func ConfigsToMap(configReference []addonapiv1alpha1.ConfigReference) map[string]string { + // config name follows the format of .//, for example, + // addondeploymentconfigs.addon.open-cluster-management.io/open-cluster-management/default. + // for a cluster scoped resource, the namespace would be empty, for example, + // addonhubconfigs.addon.open-cluster-management.io//default. + specHashMap := make(map[string]string, len(configReference)) + for _, v := range configReference { + if v.DesiredConfig == nil { + continue + } + resourceStr := v.Resource + if len(v.Group) > 0 { + resourceStr += fmt.Sprintf(".%s", v.Group) + } + resourceStr += fmt.Sprintf("/%s/%s", v.DesiredConfig.Namespace, v.DesiredConfig.Name) + + specHashMap[resourceStr] = v.DesiredConfig.SpecHash + } + + return specHashMap +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrapprove.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrapprove.go new file mode 100644 index 000000000..214eb905f --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrapprove.go @@ -0,0 +1,374 @@ +package certificate + +import ( + "context" + "fmt" + "strings" + + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + certificatesinformers "k8s.io/client-go/informers/certificates/v1" + v1beta1certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1" + "k8s.io/client-go/kubernetes" + certificateslisters "k8s.io/client-go/listers/certificates/v1" + v1beta1certificateslisters "k8s.io/client-go/listers/certificates/v1beta1" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +var ( + // EnableV1Beta1CSRCompatibility is a condition variable that enables/disables + // the compatibility with V1beta1 CSR api. If enabled, the CSR approver + // controller wil watch and approve over the V1beta1 CSR api instead of V1. + // Setting the variable to false will make the CSR signer controller strictly + // requires V1 CSR api. + // + // The distinction between V1 and V1beta1 CSR is that the latter doesn't have + // a "signerName" field which is used for discriminating external certificate + // signers. With that being said, under V1beta1 CSR api once a CSR object is + // approved, it will be immediately signed by the CSR signer controller from + // kube-controller-manager. So the csr signer controller will be permanently + // disabled to avoid conflict with Kubernetes' original CSR signer. + // + // TODO: Remove this condition gate variable after V1beta1 CSR api fades away + // in the Kubernetes community. The code blocks supporting V1beta1 CSR + // should also be removed. + EnableV1Beta1CSRCompatibility = true +) + +// csrApprovingController auto approve the renewal CertificateSigningRequests for an accepted spoke cluster on the hub. +type csrApprovingController struct { + kubeClient kubernetes.Interface + agentAddons map[string]agent.AgentAddon + managedClusterLister clusterlister.ManagedClusterLister + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + csrLister certificateslisters.CertificateSigningRequestLister + csrListerBeta v1beta1certificateslisters.CertificateSigningRequestLister +} + +// NewCSRApprovingController creates a new csr approving controller +func NewCSRApprovingController( + kubeClient kubernetes.Interface, + clusterInformers clusterinformers.ManagedClusterInformer, + csrV1Informer certificatesinformers.CertificateSigningRequestInformer, + csrBetaInformer v1beta1certificatesinformers.CertificateSigningRequestInformer, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + agentAddons map[string]agent.AgentAddon, +) factory.Controller { + if (csrV1Informer != nil) == (csrBetaInformer != nil) { + klog.Fatalf("V1 and V1beta1 CSR informer cannot be present or absent at the same time") + } + c := &csrApprovingController{ + kubeClient: kubeClient, + agentAddons: agentAddons, + managedClusterLister: clusterInformers.Lister(), + managedClusterAddonLister: addonInformers.Lister(), + } + var csrInformer cache.SharedIndexInformer + if csrV1Informer != nil { + c.csrLister = csrV1Informer.Lister() + csrInformer = csrV1Informer.Informer() + } + if EnableV1Beta1CSRCompatibility && csrBetaInformer != nil { + c.csrListerBeta = csrBetaInformer.Lister() + csrInformer = csrBetaInformer.Informer() + } + + return factory.New(). + WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{accessor.GetName()} + }, + func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if !strings.HasPrefix(accessor.GetName(), "addon") { + return false + } + if len(accessor.GetLabels()) == 0 { + return false + } + addonName := accessor.GetLabels()[addonv1alpha1.AddonLabelKey] + if _, ok := agentAddons[addonName]; !ok { + return false + } + return true + }, + csrInformer). + WithSync(c.sync). + ToController("CSRApprovingController") +} + +func (c *csrApprovingController) sync(ctx context.Context, syncCtx factory.SyncContext, csrName string) error { + klog.V(4).Infof("Reconciling CertificateSigningRequests %q", csrName) + + csr, err := c.getCSR(csrName) + if csr == nil { + return nil + } + if err != nil { + return err + } + + if isCSRApproved(csr) || IsCSRInTerminalState(csr) { + return nil + } + + addonName := csr.GetLabels()[addonv1alpha1.AddonLabelKey] + agentAddon, ok := c.agentAddons[addonName] + if !ok { + return nil + } + + registrationOption := agentAddon.GetAgentAddonOptions().Registration + if registrationOption == nil { + return nil + } + clusterName, ok := csr.GetLabels()[clusterv1.ClusterNameLabelKey] + if !ok { + return nil + } + + // Get ManagedCluster + managedCluster, err := c.managedClusterLister.Get(clusterName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + // Get Addon + managedClusterAddon, err := c.managedClusterAddonLister.ManagedClusterAddOns(clusterName).Get(addonName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + if registrationOption.CSRApproveCheck == nil { + klog.V(4).Infof("addon csr %q cannont be auto approved due to approve check not defined", csr.GetName()) + return nil + } + + if err := c.approve(ctx, registrationOption, managedCluster, managedClusterAddon, csr); err != nil { + return err + } + + return nil +} + +func (c *csrApprovingController) getCSR(csrName string) (metav1.Object, error) { + // TODO: remove the following block for deprecating V1beta1 CSR compatibility + if EnableV1Beta1CSRCompatibility { + if c.csrListerBeta != nil { + csr, err := c.csrListerBeta.Get(csrName) + if errors.IsNotFound(err) { + return nil, nil + } + if err != nil { + return nil, err + } + return csr, nil + } + } + csr, err := c.csrLister.Get(csrName) + if errors.IsNotFound(err) { + return nil, nil + } + if err != nil { + return nil, err + } + return csr, nil +} + +func (c *csrApprovingController) approve( + ctx context.Context, + registrationOption *agent.RegistrationOption, + managedCluster *clusterv1.ManagedCluster, + managedClusterAddon *addonv1alpha1.ManagedClusterAddOn, + csr metav1.Object) error { + + switch t := csr.(type) { + case *certificatesv1.CertificateSigningRequest: + approve := registrationOption.CSRApproveCheck(managedCluster, managedClusterAddon, t) + if !approve { + klog.V(4).Infof("addon csr %q cannont be auto approved due to approve check fails", csr.GetName()) + return nil + } + return c.approveCSRV1(ctx, t) + // TODO: remove the following block for deprecating V1beta1 CSR compatibility + case *certificatesv1beta1.CertificateSigningRequest: + v1CSR := unsafeConvertV1beta1CSRToV1CSR(t) + approve := registrationOption.CSRApproveCheck(managedCluster, managedClusterAddon, v1CSR) + if !approve { + klog.V(4).Infof("addon csr %q cannont be auto approved due to approve check fails", csr.GetName()) + return nil + } + return c.approveCSRV1Beta1(ctx, t) + default: + return fmt.Errorf("unknown csr object type: %t", csr) + } +} + +func (c *csrApprovingController) approveCSRV1(ctx context.Context, v1CSR *certificatesv1.CertificateSigningRequest) error { + v1CSR.Status.Conditions = append(v1CSR.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "AutoApprovedByHubCSRApprovingController", + Message: "Auto approving addon agent certificate.", + }) + _, err := c.kubeClient.CertificatesV1().CertificateSigningRequests().UpdateApproval(ctx, v1CSR.GetName(), v1CSR, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +func (c *csrApprovingController) approveCSRV1Beta1(ctx context.Context, v1beta1CSR *certificatesv1beta1.CertificateSigningRequest) error { + v1beta1CSR.Status.Conditions = append(v1beta1CSR.Status.Conditions, certificatesv1beta1.CertificateSigningRequestCondition{ + Type: certificatesv1beta1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "AutoApprovedByHubCSRApprovingController", + Message: "Auto approving addon agent certificate.", + }) + _, err := c.kubeClient.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(ctx, v1beta1CSR, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +// Check whether a CSR is in terminal state +func IsCSRInTerminalState(csr metav1.Object) bool { + if v1CSR, ok := csr.(*certificatesv1.CertificateSigningRequest); ok { + for _, c := range v1CSR.Status.Conditions { + if c.Type == certificatesv1.CertificateApproved { + return true + } + if c.Type == certificatesv1.CertificateDenied { + return true + } + } + } + // TODO: remove the following block for deprecating V1beta1 CSR compatibility + if EnableV1Beta1CSRCompatibility { + if v1beta1CSR, ok := csr.(*certificatesv1beta1.CertificateSigningRequest); ok { + for _, c := range v1beta1CSR.Status.Conditions { + if c.Type == certificatesv1beta1.CertificateApproved { + return true + } + if c.Type == certificatesv1beta1.CertificateDenied { + return true + } + } + } + } + return false +} + +func isCSRApproved(csr metav1.Object) bool { + approved := false + if v1CSR, ok := csr.(*certificatesv1.CertificateSigningRequest); ok { + for _, condition := range v1CSR.Status.Conditions { + if condition.Type == certificatesv1.CertificateDenied { + return false + } else if condition.Type == certificatesv1.CertificateApproved { + approved = true + } + } + } + // TODO: remove the following block for deprecating V1beta1 CSR compatibility + if EnableV1Beta1CSRCompatibility { + if v1beta1CSR, ok := csr.(*certificatesv1beta1.CertificateSigningRequest); ok { + for _, condition := range v1beta1CSR.Status.Conditions { + if condition.Type == certificatesv1beta1.CertificateDenied { + return false + } else if condition.Type == certificatesv1beta1.CertificateApproved { + approved = true + } + } + } + } + return approved +} + +// TODO: remove the following block for deprecating V1beta1 CSR compatibility +func unsafeConvertV1beta1CSRToV1CSR(v1beta1CSR *certificatesv1beta1.CertificateSigningRequest) *certificatesv1.CertificateSigningRequest { + v1CSR := &certificatesv1.CertificateSigningRequest{ + TypeMeta: metav1.TypeMeta{ + APIVersion: certificatesv1.SchemeGroupVersion.String(), + Kind: "CertificateSigningRequest", + }, + ObjectMeta: *v1beta1CSR.ObjectMeta.DeepCopy(), + Spec: certificatesv1.CertificateSigningRequestSpec{ + Request: v1beta1CSR.Spec.Request, + ExpirationSeconds: v1beta1CSR.Spec.ExpirationSeconds, + Usages: unsafeCovertV1beta1KeyUsageToV1KeyUsage(v1beta1CSR.Spec.Usages), + Username: v1beta1CSR.Spec.Username, + UID: v1beta1CSR.Spec.UID, + Groups: v1beta1CSR.Spec.Groups, + Extra: unsafeCovertV1beta1ExtraValueToV1ExtraValue(v1beta1CSR.Spec.Extra), + }, + Status: certificatesv1.CertificateSigningRequestStatus{ + Certificate: v1beta1CSR.Status.Certificate, + Conditions: unsafeCovertV1beta1ConditionsToV1Conditions(v1beta1CSR.Status.Conditions), + }, + } + if v1beta1CSR.Spec.SignerName != nil { + v1CSR.Spec.SignerName = *v1beta1CSR.Spec.SignerName + } + return v1CSR +} + +// TODO: remove the following block for deprecating V1beta1 CSR compatibility +func unsafeCovertV1beta1KeyUsageToV1KeyUsage(usages []certificatesv1beta1.KeyUsage) []certificatesv1.KeyUsage { + v1Usages := make([]certificatesv1.KeyUsage, len(usages)) + for i := range usages { + v1Usages[i] = certificatesv1.KeyUsage(usages[i]) + } + return v1Usages +} + +// TODO: remove the following block for deprecating V1beta1 CSR compatibility +func unsafeCovertV1beta1ExtraValueToV1ExtraValue( + extraValues map[string]certificatesv1beta1.ExtraValue) map[string]certificatesv1.ExtraValue { + v1Values := make(map[string]certificatesv1.ExtraValue) + for k := range extraValues { + v1Values[k] = certificatesv1.ExtraValue(extraValues[k]) + } + return v1Values +} + +// TODO: remove the following block for deprecating V1beta1 CSR compatibility +func unsafeCovertV1beta1ConditionsToV1Conditions( + conditions []certificatesv1beta1.CertificateSigningRequestCondition, +) []certificatesv1.CertificateSigningRequestCondition { + v1Conditions := make([]certificatesv1.CertificateSigningRequestCondition, len(conditions)) + for i := range conditions { + v1Conditions[i] = certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.RequestConditionType(conditions[i].Type), + Status: conditions[i].Status, + Reason: conditions[i].Reason, + Message: conditions[i].Message, + LastTransitionTime: conditions[i].LastTransitionTime, + LastUpdateTime: conditions[i].LastUpdateTime, + } + } + return v1Conditions +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go new file mode 100644 index 000000000..6c0a3d766 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go @@ -0,0 +1,147 @@ +package certificate + +import ( + "context" + "fmt" + "strings" + + certificatesv1 "k8s.io/api/certificates/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + certificatesinformers "k8s.io/client-go/informers/certificates/v1" + "k8s.io/client-go/kubernetes" + certificateslisters "k8s.io/client-go/listers/certificates/v1" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +// csrApprovingController auto approve the renewal CertificateSigningRequests for an accepted spoke cluster on the hub. +type csrSignController struct { + kubeClient kubernetes.Interface + agentAddons map[string]agent.AgentAddon + managedClusterLister clusterlister.ManagedClusterLister + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + csrLister certificateslisters.CertificateSigningRequestLister +} + +// NewCSRApprovingController creates a new csr approving controller +func NewCSRSignController( + kubeClient kubernetes.Interface, + clusterInformers clusterinformers.ManagedClusterInformer, + csrInformer certificatesinformers.CertificateSigningRequestInformer, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + agentAddons map[string]agent.AgentAddon, +) factory.Controller { + c := &csrSignController{ + kubeClient: kubeClient, + agentAddons: agentAddons, + managedClusterLister: clusterInformers.Lister(), + managedClusterAddonLister: addonInformers.Lister(), + csrLister: csrInformer.Lister(), + } + return factory.New(). + WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{accessor.GetName()} + }, + func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if !strings.HasPrefix(accessor.GetName(), "addon") { + return false + } + if len(accessor.GetLabels()) == 0 { + return false + } + addonName := accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey] + if _, ok := agentAddons[addonName]; !ok { + return false + } + return true + }, + csrInformer.Informer()). + WithSync(c.sync). + ToController("CSRApprovingController") +} + +func (c *csrSignController) sync(ctx context.Context, syncCtx factory.SyncContext, csrName string) error { + klog.V(4).Infof("Reconciling CertificateSigningRequests %q", csrName) + csr, err := c.csrLister.Get(csrName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + csr = csr.DeepCopy() + + if !isCSRApproved(csr) { + return nil + } + + if len(csr.Status.Certificate) > 0 { + return nil + } + + // Do not sigh apiserver cert + if csr.Spec.SignerName == certificatesv1.KubeAPIServerClientSignerName { + return nil + } + + addonName := csr.Labels[addonapiv1alpha1.AddonLabelKey] + agentAddon, ok := c.agentAddons[addonName] + if !ok { + return nil + } + + registrationOption := agentAddon.GetAgentAddonOptions().Registration + if registrationOption == nil { + return nil + } + clusterName, ok := csr.Labels[clusterv1.ClusterNameLabelKey] + if !ok { + return nil + } + + // Get ManagedCluster + _, err = c.managedClusterLister.Get(clusterName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + _, err = c.managedClusterAddonLister.ManagedClusterAddOns(clusterName).Get(addonName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + if registrationOption.CSRSign == nil { + return nil + } + + csr.Status.Certificate = registrationOption.CSRSign(csr) + if len(csr.Status.Certificate) == 0 { + return fmt.Errorf("invalid client certificate generated for addon csr %q", csr.Name) + } + + _, err = c.kubeClient.CertificatesV1().CertificateSigningRequests().UpdateStatus(ctx, csr, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go new file mode 100644 index 000000000..d508372ca --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go @@ -0,0 +1,273 @@ +package managementaddonconfig + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/dynamic/dynamiclister" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +const ( + controllerName = "management-addon-config-controller" +) + +type enqueueFunc func(obj interface{}) + +// clusterManagementAddonConfigController reconciles all interested addon config types (GroupVersionResource) on the hub. +type clusterManagementAddonConfigController struct { + addonClient addonv1alpha1client.Interface + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + clusterManagementAddonIndexer cache.Indexer + configListers map[schema.GroupResource]dynamiclister.Lister + queue workqueue.RateLimitingInterface + addonFilterFunc factory.EventFilterFunc + configGVRs map[schema.GroupVersionResource]bool +} + +func NewManagementAddonConfigController( + addonClient addonv1alpha1client.Interface, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + configInformerFactory dynamicinformer.DynamicSharedInformerFactory, + configGVRs map[schema.GroupVersionResource]bool, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + syncCtx := factory.NewSyncContext(controllerName) + + c := &clusterManagementAddonConfigController{ + addonClient: addonClient, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + clusterManagementAddonIndexer: clusterManagementAddonInformers.Informer().GetIndexer(), + configListers: map[schema.GroupResource]dynamiclister.Lister{}, + queue: syncCtx.Queue(), + addonFilterFunc: addonFilterFunc, + configGVRs: configGVRs, + } + + configInformers := c.buildConfigInformers(configInformerFactory, configGVRs) + + return factory.New(). + WithSyncContext(syncCtx). + WithInformersQueueKeysFunc(func(obj runtime.Object) []string { + key, _ := cache.MetaNamespaceKeyFunc(obj) + return []string{key} + }, clusterManagementAddonInformers.Informer()). + WithBareInformers(configInformers...). + WithSync(c.sync).ToController(controllerName) +} + +func (c *clusterManagementAddonConfigController) buildConfigInformers( + configInformerFactory dynamicinformer.DynamicSharedInformerFactory, + configGVRs map[schema.GroupVersionResource]bool, +) []factory.Informer { + configInformers := []factory.Informer{} + for gvrRaw := range configGVRs { + gvr := gvrRaw // copy the value since it will be used in the closure + indexInformer := configInformerFactory.ForResource(gvr).Informer() + _, err := indexInformer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueueClusterManagementAddOnsByConfig(gvr), + UpdateFunc: func(oldObj, newObj interface{}) { + c.enqueueClusterManagementAddOnsByConfig(gvr)(newObj) + }, + DeleteFunc: c.enqueueClusterManagementAddOnsByConfig(gvr), + }, + ) + if err != nil { + utilruntime.HandleError(err) + } + configInformers = append(configInformers, indexInformer) + c.configListers[schema.GroupResource{Group: gvr.Group, Resource: gvr.Resource}] = dynamiclister.New(indexInformer.GetIndexer(), gvr) + } + return configInformers +} + +func (c *clusterManagementAddonConfigController) enqueueClusterManagementAddOnsByConfig(gvr schema.GroupVersionResource) enqueueFunc { + return func(obj interface{}) { + namespaceName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error to get accessor of object: %v", obj)) + return + } + + objs, err := c.clusterManagementAddonIndexer.ByIndex( + index.ClusterManagementAddonByConfig, fmt.Sprintf("%s/%s/%s", gvr.Group, gvr.Resource, namespaceName)) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error to get addons: %v", err)) + return + } + + for _, obj := range objs { + if obj == nil { + continue + } + key, _ := cache.MetaNamespaceKeyFunc(obj) + c.queue.Add(key) + } + } +} + +func (c *clusterManagementAddonConfigController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + cma, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + // addon cloud be deleted, ignore + return nil + } + if err != nil { + return err + } + + if !c.addonFilterFunc(cma) { + return nil + } + + cmaCopy := cma.DeepCopy() + if err := c.updateConfigSpecHash(cmaCopy); err != nil { + return err + } + + return c.patchConfigReferences(ctx, cma, cmaCopy) +} + +func (c *clusterManagementAddonConfigController) updateConfigSpecHash(cma *addonapiv1alpha1.ClusterManagementAddOn) error { + + for i, defaultConfigReference := range cma.Status.DefaultConfigReferences { + if !utils.ContainGR( + c.configGVRs, + defaultConfigReference.ConfigGroupResource.Group, + defaultConfigReference.ConfigGroupResource.Resource) { + continue + } + + if defaultConfigReference.DesiredConfig == nil || defaultConfigReference.DesiredConfig.Name == "" { + continue + } + + specHash, err := c.getConfigSpecHash(defaultConfigReference.ConfigGroupResource, defaultConfigReference.DesiredConfig.ConfigReferent) + if err != nil { + return nil + } + cma.Status.DefaultConfigReferences[i].DesiredConfig.SpecHash = specHash + } + + for i, installProgression := range cma.Status.InstallProgressions { + for j, configReference := range installProgression.ConfigReferences { + if configReference.DesiredConfig == nil || configReference.DesiredConfig.Name == "" { + continue + } + + if !utils.ContainGR( + c.configGVRs, + configReference.ConfigGroupResource.Group, + configReference.ConfigGroupResource.Resource) { + continue + } + + specHash, err := c.getConfigSpecHash(configReference.ConfigGroupResource, configReference.DesiredConfig.ConfigReferent) + if err != nil { + return nil + } + cma.Status.InstallProgressions[i].ConfigReferences[j].DesiredConfig.SpecHash = specHash + } + } + + return nil +} + +func (c *clusterManagementAddonConfigController) patchConfigReferences(ctx context.Context, old, new *addonapiv1alpha1.ClusterManagementAddOn) error { + if equality.Semantic.DeepEqual(new.Status.DefaultConfigReferences, old.Status.DefaultConfigReferences) && + equality.Semantic.DeepEqual(new.Status.InstallProgressions, old.Status.InstallProgressions) { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ClusterManagementAddOn{ + Status: addonapiv1alpha1.ClusterManagementAddOnStatus{ + DefaultConfigReferences: old.Status.DefaultConfigReferences, + InstallProgressions: old.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ClusterManagementAddOnStatus{ + DefaultConfigReferences: new.Status.DefaultConfigReferences, + InstallProgressions: new.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(4).Infof("Patching addon %s/%s config reference with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( + ctx, + new.Name, + types.MergePatchType, + patchBytes, + metav1.PatchOptions{}, + "status", + ) + return err +} + +func (c *clusterManagementAddonConfigController) getConfigSpecHash(gr addonapiv1alpha1.ConfigGroupResource, + cr addonapiv1alpha1.ConfigReferent) (string, error) { + lister, ok := c.configListers[schema.GroupResource{Group: gr.Group, Resource: gr.Resource}] + if !ok { + return "", nil + } + + var config *unstructured.Unstructured + var err error + if cr.Namespace == "" { + config, err = lister.Get(cr.Name) + } else { + config, err = lister.Namespace(cr.Namespace).Get(cr.Name) + } + if errors.IsNotFound(err) { + return "", nil + } + if err != nil { + return "", err + } + + return utils.GetSpecHash(config) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go new file mode 100644 index 000000000..ac0f07dde --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go @@ -0,0 +1,214 @@ +package registration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +// addonRegistrationController reconciles instances of ManagedClusterAddon on the hub. +type addonRegistrationController struct { + addonClient addonv1alpha1client.Interface + managedClusterLister clusterlister.ManagedClusterLister + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + agentAddons map[string]agent.AgentAddon +} + +func NewAddonRegistrationController( + addonClient addonv1alpha1client.Interface, + clusterInformers clusterinformers.ManagedClusterInformer, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + agentAddons map[string]agent.AgentAddon, +) factory.Controller { + c := &addonRegistrationController{ + addonClient: addonClient, + managedClusterLister: clusterInformers.Lister(), + managedClusterAddonLister: addonInformers.Lister(), + agentAddons: agentAddons, + } + + return factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if _, ok := c.agentAddons[accessor.GetName()]; !ok { + return false + } + + return true + }, + addonInformers.Informer()). + WithSync(c.sync).ToController("addon-registration-controller") +} + +func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + klog.V(4).Infof("Reconciling addon registration %q", key) + + clusterName, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is not in format: namespace/name + return nil + } + + agentAddon, ok := c.agentAddons[addonName] + if !ok { + return nil + } + + // Get ManagedCluster + managedCluster, err := c.managedClusterLister.Get(clusterName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + managedClusterAddon, err := c.managedClusterAddonLister.ManagedClusterAddOns(clusterName).Get(addonName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + managedClusterAddonCopy := managedClusterAddon.DeepCopy() + + // wait until the mca's ownerref is set. + if !utils.IsOwnedByCMA(managedClusterAddonCopy) { + return nil + } + + var supportedConfigs []addonapiv1alpha1.ConfigGroupResource + for _, config := range agentAddon.GetAgentAddonOptions().SupportedConfigGVRs { + supportedConfigs = append(supportedConfigs, addonapiv1alpha1.ConfigGroupResource{ + Group: config.Group, + Resource: config.Resource, + }) + } + managedClusterAddonCopy.Status.SupportedConfigs = supportedConfigs + + registrationOption := agentAddon.GetAgentAddonOptions().Registration + if registrationOption == nil { + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.RegistrationAppliedNilRegistration, + Message: "Registration of the addon agent is configured", + }) + return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) + } + + if registrationOption.PermissionConfig != nil { + err = registrationOption.PermissionConfig(managedCluster, managedClusterAddonCopy) + if err != nil { + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.RegistrationAppliedSetPermissionFailed, + Message: fmt.Sprintf("Failed to set permission for hub agent: %v", err), + }) + if patchErr := c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon); patchErr != nil { + return patchErr + } + return err + } + } + + if registrationOption.CSRConfigurations == nil { + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.RegistrationAppliedNilRegistration, + Message: "Registration of the addon agent is configured", + }) + return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) + } + configs := registrationOption.CSRConfigurations(managedCluster) + + managedClusterAddonCopy.Status.Registrations = configs + + managedClusterAddonCopy.Status.Namespace = registrationOption.Namespace + if len(managedClusterAddonCopy.Spec.InstallNamespace) > 0 { + managedClusterAddonCopy.Status.Namespace = managedClusterAddonCopy.Spec.InstallNamespace + } + + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.RegistrationAppliedSetPermissionApplied, + Message: "Registration of the addon agent is configured", + }) + + return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) +} + +func (c *addonRegistrationController) patchAddonStatus(ctx context.Context, new, old *addonapiv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status.Registrations, old.Status.Registrations) && + equality.Semantic.DeepEqual(new.Status.Conditions, old.Status.Conditions) && + equality.Semantic.DeepEqual(new.Status.SupportedConfigs, old.Status.SupportedConfigs) && + new.Status.Namespace == old.Status.Namespace { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + Registrations: old.Status.Registrations, + Namespace: old.Status.Namespace, + SupportedConfigs: old.Status.SupportedConfigs, + Conditions: old.Status.Conditions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + Registrations: new.Status.Registrations, + Namespace: new.Status.Namespace, + SupportedConfigs: new.Status.SupportedConfigs, + Conditions: new.Status.Conditions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go new file mode 100644 index 000000000..e12b7ee70 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go @@ -0,0 +1,365 @@ +package addonmanager + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration" + "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +// AddonManager is the interface to initialize a manager on hub to manage the addon +// agents on all managedcluster +type AddonManager interface { + // AddAgent register an addon agent to the manager. + AddAgent(addon agent.AgentAddon) error + + // Trigger triggers a reconcile loop in the manager. Currently it + // only trigger the deploy controller. + Trigger(clusterName, addonName string) + + // Start starts all registered addon agent. + Start(ctx context.Context) error + + // StartWithInformers starts all registered addon agent with the given informers. + StartWithInformers(ctx context.Context, + kubeInformers kubeinformers.SharedInformerFactory, + workInformers workv1informers.SharedInformerFactory, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error +} + +type addonManager struct { + addonAgents map[string]agent.AgentAddon + addonConfigs map[schema.GroupVersionResource]bool + config *rest.Config + syncContexts []factory.SyncContext +} + +func (a *addonManager) AddAgent(addon agent.AgentAddon) error { + addonOption := addon.GetAgentAddonOptions() + if len(addonOption.AddonName) == 0 { + return fmt.Errorf("addon name should be set") + } + if _, ok := a.addonAgents[addonOption.AddonName]; ok { + return fmt.Errorf("an agent is added for the addon already") + } + a.addonAgents[addonOption.AddonName] = addon + return nil +} + +func (a *addonManager) Trigger(clusterName, addonName string) { + for _, syncContex := range a.syncContexts { + syncContex.Queue().Add(fmt.Sprintf("%s/%s", clusterName, addonName)) + } +} + +func (a *addonManager) Start(ctx context.Context) error { + kubeClient, err := kubernetes.NewForConfig(a.config) + if err != nil { + return err + } + + workClient, err := workv1client.NewForConfig(a.config) + if err != nil { + return err + } + + dynamicClient, err := dynamic.NewForConfig(a.config) + if err != nil { + return err + } + + addonClient, err := addonv1alpha1client.NewForConfig(a.config) + if err != nil { + return err + } + + clusterClient, err := clusterv1client.NewForConfig(a.config) + if err != nil { + return err + } + + addonInformers := addoninformers.NewSharedInformerFactory(addonClient, 10*time.Minute) + clusterInformers := clusterv1informers.NewSharedInformerFactory(clusterClient, 10*time.Minute) + dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 10*time.Minute) + + var addonNames []string + for key := range a.addonAgents { + addonNames = append(addonNames, key) + } + kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, + kubeinformers.WithTweakListOptions(func(listOptions *metav1.ListOptions) { + selector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: addonv1alpha1.AddonLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: addonNames, + }, + }, + } + listOptions.LabelSelector = metav1.FormatLabelSelector(selector) + }), + ) + + workInformers := workv1informers.NewSharedInformerFactoryWithOptions(workClient, 10*time.Minute, + workv1informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) { + selector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: addonv1alpha1.AddonLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: addonNames, + }, + }, + } + listOptions.LabelSelector = metav1.FormatLabelSelector(selector) + }), + ) + + // addonDeployController + err = workInformers.Work().V1().ManifestWorks().Informer().AddIndexers( + cache.Indexers{ + index.ManifestWorkByAddon: index.IndexManifestWorkByAddon, + index.ManifestWorkByHostedAddon: index.IndexManifestWorkByHostedAddon, + index.ManifestWorkHookByHostedAddon: index.IndexManifestWorkHookByHostedAddon, + }, + ) + if err != nil { + return err + } + + // addonConfigController + err = addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{index.AddonByConfig: index.IndexAddonByConfig}, + ) + if err != nil { + return err + } + + // managementAddonConfigController + err = addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().AddIndexers( + cache.Indexers{index.ClusterManagementAddonByConfig: index.IndexClusterManagementAddonByConfig}) + if err != nil { + return err + } + + err = addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ClusterManagementAddonByPlacement: index.IndexClusterManagementAddonByPlacement, + }) + if err != nil { + return err + } + err = addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ManagedClusterAddonByName: index.IndexManagedClusterAddonByName, + }) + if err != nil { + return err + } + + err = a.StartWithInformers(ctx, kubeInformers, workInformers, addonInformers, clusterInformers, dynamicInformers) + if err != nil { + return err + } + + kubeInformers.Start(ctx.Done()) + workInformers.Start(ctx.Done()) + addonInformers.Start(ctx.Done()) + clusterInformers.Start(ctx.Done()) + dynamicInformers.Start(ctx.Done()) + return nil +} + +func (a *addonManager) StartWithInformers(ctx context.Context, + kubeInformers kubeinformers.SharedInformerFactory, + workInformers workv1informers.SharedInformerFactory, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { + + kubeClient, err := kubernetes.NewForConfig(a.config) + if err != nil { + return err + } + + addonClient, err := addonv1alpha1client.NewForConfig(a.config) + if err != nil { + return err + } + + workClient, err := workv1client.NewForConfig(a.config) + if err != nil { + return err + } + + v1CSRSupported, v1beta1Supported, err := utils.IsCSRSupported(kubeClient) + if err != nil { + return err + } + + for _, agentImpl := range a.addonAgents { + for _, configGVR := range agentImpl.GetAgentAddonOptions().SupportedConfigGVRs { + a.addonConfigs[configGVR] = true + } + } + + deployController := agentdeploy.NewAddonDeployController( + workClient, + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + workInformers.Work().V1().ManifestWorks(), + a.addonAgents, + ) + + registrationController := registration.NewAddonRegistrationController( + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + + addonInstallController := addoninstall.NewAddonInstallController( + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + + // This is a duplicate controller in general addon-manager. This should be removed when we + // alway enable the addon-manager + addonOwnerController := addonowner.NewAddonOwnerController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + utils.ManagedBySelf(a.addonAgents), + ) + + var addonConfigController, managementAddonConfigController, addonConfigurationController factory.Controller + if len(a.addonConfigs) != 0 { + addonConfigController = addonconfig.NewAddonConfigController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + dynamicInformers, + a.addonConfigs, + utils.FilterByAddonName(a.addonAgents), + ) + managementAddonConfigController = managementaddonconfig.NewManagementAddonConfigController( + addonClient, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + dynamicInformers, + a.addonConfigs, + utils.FilterByAddonName(a.addonAgents), + ) + + // start addonConfiguration controller, note this is to handle the case when the general addon-manager + // is not started, we should consider to remove this when the general addon-manager are always started. + // This controller will also ignore the installStrategy part. + addonConfigurationController = addonconfiguration.NewAddonConfigurationController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + nil, nil, + utils.ManagedBySelf(a.addonAgents), + ) + } + + var csrApproveController factory.Controller + var csrSignController factory.Controller + // Spawn the following controllers only if v1 CSR api is supported in the + // hub cluster. Under v1beta1 CSR api, all the CSR objects will be signed + // by the kube-controller-manager so custom CSR controller should be + // disabled to avoid conflict. + if v1CSRSupported { + csrApproveController = certificate.NewCSRApprovingController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + kubeInformers.Certificates().V1().CertificateSigningRequests(), + nil, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + csrSignController = certificate.NewCSRSignController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + kubeInformers.Certificates().V1().CertificateSigningRequests(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + } else if v1beta1Supported { + csrApproveController = certificate.NewCSRApprovingController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + nil, + kubeInformers.Certificates().V1beta1().CertificateSigningRequests(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + } + + a.syncContexts = append(a.syncContexts, deployController.SyncContext()) + + go deployController.Run(ctx, 1) + go registrationController.Run(ctx, 1) + go addonInstallController.Run(ctx, 1) + + go addonOwnerController.Run(ctx, 1) + if addonConfigController != nil { + go addonConfigController.Run(ctx, 1) + } + if managementAddonConfigController != nil { + go managementAddonConfigController.Run(ctx, 1) + } + if addonConfigurationController != nil { + go addonConfigurationController.Run(ctx, 1) + } + if csrApproveController != nil { + go csrApproveController.Run(ctx, 1) + } + if csrSignController != nil { + go csrSignController.Run(ctx, 1) + } + return nil +} + +// New returns a new Manager for creating addon agents. +func New(config *rest.Config) (AddonManager, error) { + return &addonManager{ + config: config, + syncContexts: []factory.SyncContext{}, + addonConfigs: map[schema.GroupVersionResource]bool{}, + addonAgents: map[string]agent.AgentAddon{}, + }, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go b/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go new file mode 100644 index 000000000..3138067a4 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go @@ -0,0 +1,276 @@ +package agent + +import ( + "fmt" + + certificatesv1 "k8s.io/api/certificates/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +// AgentAddon is a mandatory interface for implementing a custom addon. +// The addon is expected to be registered into an AddonManager so the manager will be invoking the addon +// implementation below as callbacks upon: +// - receiving valid watch events from ManagedClusterAddon. +// - receiving valid watch events from ManifestWork. +type AgentAddon interface { + + // Manifests returns a list of manifest resources to be deployed on the managed cluster for this addon. + // The resources in this list are required to explicitly clarify their TypeMta (i.e. apiVersion, kind) + // otherwise the addon deploying will fail constantly. A recommended set of addon component returned + // here will be: + // - the hosting namespace of the addon-agents. + // - a deployment of the addon agents. + // - the configurations (e.g. configmaps) mounted by the deployment. + // - the RBAC permission bond to the addon agents *in the managed cluster*. (the hub cluster's RBAC + // setup shall be done at GetAgentAddonOptions below.) + // NB for dispatching namespaced resources, it's recommended to include the namespace in the list. + Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) + + // GetAgentAddonOptions returns the agent options for advanced agent customization. + // A minimal option is merely setting a unique addon name in the AgentAddonOptions. + GetAgentAddonOptions() AgentAddonOptions +} + +// AgentAddonOptions prescribes the future customization for the addon. +type AgentAddonOptions struct { + // AddonName is the name of the addon. + // Should be globally unique. + // +required + AddonName string + + // Registration prescribes the custom behavior during CSR applying, approval and signing. + // +optional + Registration *RegistrationOption + + // InstallStrategy defines that addon should be created in which clusters. + // Addon will not be installed automatically until a ManagedClusterAddon is applied to the cluster's + // namespace if InstallStrategy is nil. + // +optional + InstallStrategy *InstallStrategy + + // Updaters select a set of resources and define the strategies to update them. + // UpdateStrategy is Update if no Updater is defined for a resource. + // +optional + Updaters []Updater + + // HealthProber defines how is the healthiness status of the ManagedClusterAddon probed. + // Note that the prescribed prober type here only applies to the automatically installed + // addons configured via InstallStrategy. + // If nil, will be defaulted to "Lease" type. + // +optional + HealthProber *HealthProber + + // HostedModeEnabled defines whether the Hosted deploying mode for the addon agent is enabled + // If not set, will be defaulted to false. + // +optional + HostedModeEnabled bool + + // SupportedConfigGVRs is a list of addon supported configuration GroupVersionResource + // each configuration GroupVersionResource should be unique + SupportedConfigGVRs []schema.GroupVersionResource +} + +type CSRSignerFunc func(csr *certificatesv1.CertificateSigningRequest) []byte + +type CSRApproveFunc func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, csr *certificatesv1.CertificateSigningRequest) bool + +type PermissionConfigFunc func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error + +// RegistrationOption defines how agent is registered to the hub cluster. It needs to define: +// 1. csr with what subject/signer should be created +// 2. how csr is approved +// 3. the RBAC setting of agent on the hub +// 4. how csr is signed if the customized signer is used. +type RegistrationOption struct { + // CSRConfigurations returns a list of csr configuration for the adddon agent in a managed cluster. + // A csr will be created from the managed cluster for addon agent with each CSRConfiguration. + // +required + CSRConfigurations func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig + + // Namespace is the namespace where registraiton credential will be put on the managed cluster. It + // will be overridden by installNamespace on ManagedClusterAddon spec if set + Namespace string + + // CSRApproveCheck checks whether the addon agent registration should be approved by the hub. + // Addon hub controller can implement this func to auto-approve all the CSRs. A better CSR check is + // recommended to include (1) the validity of requester's requesting identity and (2) the other request + // payload such as key-usages. + // If the function is not set, the registration and certificate renewal of addon agent needs to be approved + // manually on hub. + // NB auto-approving csr requires the addon manager to have sufficient RBAC permission for the target + // signer, e.g.: + // >> { "apiGroups":["certificates.k8s.io"], + // >> "resources":["signers"], + // >> "resourceNames":["kubernetes.io/kube-apiserver-client"]...} + // +optional + CSRApproveCheck CSRApproveFunc + + // PermissionConfig defines the function for an addon to setup rbac permission. This callback doesn't + // couple with any concrete RBAC Api so the implementation is expected to ensure the RBAC in the hub + // cluster by calling the kubernetes api explicitly. Additionally we can also extend arbitrary third-party + // permission setup in this callback. + // +optional + PermissionConfig PermissionConfigFunc + + // CSRSign signs a csr and returns a certificate. It is used when the addon has its own customized signer. + // The returned byte array shall be a valid non-nil PEM encoded x509 certificate. + // +optional + CSRSign CSRSignerFunc +} + +// InstallStrategy is the installation strategy of the manifests prescribed by Manifests(..). +type InstallStrategy struct { + *installStrategy +} + +type installStrategy struct { + // InstallNamespace is target deploying namespace in the managed cluster upon automatic addon installation. + InstallNamespace string + + // managedClusterFilter will filter the clusters to install the addon to. + managedClusterFilter func(cluster *clusterv1.ManagedCluster) bool +} + +func (s *InstallStrategy) GetManagedClusterFilter() func(cluster *clusterv1.ManagedCluster) bool { + return s.managedClusterFilter +} + +type Updater struct { + // ResourceIdentifier sets what resources the strategy applies to + ResourceIdentifier workapiv1.ResourceIdentifier + + // UpdateStrategy defines the strategy used to update the manifests. + UpdateStrategy workapiv1.UpdateStrategy +} + +type HealthProber struct { + Type HealthProberType + + WorkProber *WorkHealthProber +} + +type AddonHealthCheckFunc func(workapiv1.ResourceIdentifier, workapiv1.StatusFeedbackResult) error + +type WorkHealthProber struct { + // ProbeFields tells addon framework what field to probe + ProbeFields []ProbeField + + // HealthCheck check status of the addon based on probe result. + HealthCheck AddonHealthCheckFunc +} + +// ProbeField defines the field of a resource to be probed +type ProbeField struct { + // ResourceIdentifier sets what resource should be probed + ResourceIdentifier workapiv1.ResourceIdentifier + + // ProbeRules sets the rules to probe the field + ProbeRules []workapiv1.FeedbackRule +} + +type HealthProberType string + +const ( + // HealthProberTypeNone indicates the healthiness status will be refreshed, which is + // leaving the healthiness of ManagedClusterAddon to an empty string. + HealthProberTypeNone HealthProberType = "None" + // HealthProberTypeLease indicates the healthiness of the addon is connected with the + // corresponding lease resource in the cluster namespace with the same name as the addon. + // Note that the lease object is expected to periodically refresh by a local agent + // deployed in the managed cluster implementing lease.LeaseUpdater interface. + HealthProberTypeLease HealthProberType = "Lease" + // HealthProberTypeWork indicates the healthiness of the addon is equal to the overall + // dispatching status of the corresponding ManifestWork resource. + // It's applicable to those addons that don't have a local agent instance in the managed + // clusters. The addon framework will check if the work is Available on the spoke. In addition + // user can define a prober to check more detailed status based on status feedback from work. + HealthProberTypeWork HealthProberType = "Work" +) + +func KubeClientSignerConfigurations(addonName, agentName string) func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + return func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + return []addonapiv1alpha1.RegistrationConfig{ + { + SignerName: certificatesv1.KubeAPIServerClientSignerName, + Subject: addonapiv1alpha1.Subject{ + User: DefaultUser(cluster.Name, addonName, agentName), + Groups: DefaultGroups(cluster.Name, addonName), + }, + }, + } + } +} + +// DefaultUser returns the default User +func DefaultUser(clusterName, addonName, agentName string) string { + return fmt.Sprintf("system:open-cluster-management:cluster:%s:addon:%s:agent:%s", clusterName, addonName, agentName) +} + +// DefaultGroups returns the default groups +func DefaultGroups(clusterName, addonName string) []string { + return []string{ + fmt.Sprintf("system:open-cluster-management:cluster:%s:addon:%s", clusterName, addonName), + fmt.Sprintf("system:open-cluster-management:addon:%s", addonName), + "system:authenticated", + } +} + +// InstallAllStrategy indicate to install addon to all clusters +func InstallAllStrategy(installNamespace string) *InstallStrategy { + return &InstallStrategy{ + &installStrategy{ + InstallNamespace: installNamespace, + managedClusterFilter: func(cluster *clusterv1.ManagedCluster) bool { + return true + }, + }, + } +} + +// InstallByLabelStrategy indicate to install addon based on clusters' label +func InstallByLabelStrategy(installNamespace string, selector metav1.LabelSelector) *InstallStrategy { + return &InstallStrategy{ + &installStrategy{ + InstallNamespace: installNamespace, + managedClusterFilter: func(cluster *clusterv1.ManagedCluster) bool { + selector, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + klog.Warningf("labels selector is not correct: %v", err) + return false + } + + if !selector.Matches(labels.Set(cluster.Labels)) { + return false + } + return true + }, + }, + } +} + +// InstallByFilterFunctionStrategy indicate to install addon based on a filter function, and it will also install addons if the filter function is nil. +func InstallByFilterFunctionStrategy(installNamespace string, f func(cluster *clusterv1.ManagedCluster) bool) *InstallStrategy { + if f == nil { + f = func(cluster *clusterv1.ManagedCluster) bool { + return true + } + } + return &InstallStrategy{ + &installStrategy{ + InstallNamespace: installNamespace, + managedClusterFilter: f, + }, + } +} + +// ApprovalAllCSRs returns true for all csrs. +func ApprovalAllCSRs(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, csr *certificatesv1.CertificateSigningRequest) bool { + return true +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/assets/assets.go b/vendor/open-cluster-management.io/addon-framework/pkg/assets/assets.go new file mode 100644 index 000000000..008fc0e07 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/assets/assets.go @@ -0,0 +1,149 @@ +package assets + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/errors" +) + +type Permission os.FileMode + +const ( + PermissionDirectoryDefault Permission = 0755 + PermissionFileDefault Permission = 0644 + PermissionFileRestricted Permission = 0600 +) + +// Asset defines a single static asset. +type Asset struct { + Name string + FilePermission Permission + Data []byte +} + +// Assets is a list of assets. +type Assets []Asset + +// New walks through a directory recursively and renders each file as asset. Only those files +// are rendered that make all predicates true. +func New(dir string, data interface{}, predicates ...FileInfoPredicate) (Assets, error) { + files, err := LoadFilesRecursively(dir, predicates...) + if err != nil { + return nil, err + } + + var as Assets + var errs []error + for path, bs := range files { + a, err := assetFromTemplate(path, bs, data) + if err != nil { + errs = append(errs, fmt.Errorf("failed to render %q: %v", path, err)) + continue + } + + as = append(as, *a) + } + + if len(errs) > 0 { + return nil, errors.NewAggregate(errs) + } + + return as, nil +} + +// WriteFiles writes the assets to specified path. +func (as Assets) WriteFiles(path string) error { + if err := os.MkdirAll(path, os.FileMode(PermissionDirectoryDefault)); err != nil { + return err + } + for _, asset := range as { + if _, err := os.Stat(path); os.IsExist(err) { + fmt.Printf("WARNING: File %s already exists, content will be replaced\n", path) + } + if err := asset.WriteFile(path); err != nil { + return err + } + } + return nil +} + +// WriteFile writes a single asset into specified path. +func (a Asset) WriteFile(path string) error { + f := filepath.Join(path, a.Name) + perms := PermissionFileDefault + if err := os.MkdirAll(filepath.Dir(f), os.FileMode(PermissionDirectoryDefault)); err != nil { + return err + } + if a.FilePermission != 0 { + perms = a.FilePermission + } + fmt.Printf("Writing asset: %s\n", f) + return os.WriteFile(f, a.Data, os.FileMode(perms)) +} + +// MustCreateAssetFromTemplate process the given template using and return an asset. +func MustCreateAssetFromTemplate(name string, template []byte, config interface{}) Asset { + asset, err := assetFromTemplate(name, template, config) + if err != nil { + panic(err) + } + return *asset +} + +func assetFromTemplate(name string, tb []byte, data interface{}) (*Asset, error) { + bs, err := renderFile(name, tb, data) + if err != nil { + return nil, err + } + return &Asset{Name: name, Data: bs}, nil +} + +type FileInfoPredicate func(os.FileInfo) bool + +// OnlyYaml is a predicate for LoadFilesRecursively filters out non-yaml files. +func OnlyYaml(info os.FileInfo) bool { + return strings.HasSuffix(info.Name(), ".yaml") || strings.HasSuffix(info.Name(), ".yml") +} + +// LoadFilesRecursively returns a map from relative path names to file content. +func LoadFilesRecursively(dir string, predicates ...FileInfoPredicate) (map[string][]byte, error) { + files := map[string][]byte{} + err := filepath.Walk(dir, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + for _, p := range predicates { + if !p(info) { + return nil + } + } + + bs, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return err + } + + // make path relative to dir + rel, err := filepath.Rel(dir, path) + if err != nil { + return err + } + + files[rel] = bs + return nil + }, + ) + if err != nil { + return nil, err + } + + return files, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/assets/template.go b/vendor/open-cluster-management.io/addon-framework/pkg/assets/template.go new file mode 100644 index 000000000..785439220 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/assets/template.go @@ -0,0 +1,78 @@ +package assets + +import ( + "bytes" + "encoding/base64" + "strings" + "text/template" + "time" + + "k8s.io/client-go/util/cert" +) + +var templateFuncs = map[string]interface{}{ + "notAfter": notAfter, + "notBefore": notBefore, + "issuer": issuer, + "base64": base64encode, + "indent": indent, + "load": load, +} + +func indent(indention int, v []byte) string { + newline := "\n" + strings.Repeat(" ", indention) + return strings.Replace(string(v), "\n", newline, -1) +} + +func base64encode(v []byte) string { + return base64.StdEncoding.EncodeToString(v) +} + +func notAfter(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].NotAfter.Format(time.RFC3339) +} + +func notBefore(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].NotBefore.Format(time.RFC3339) +} + +func issuer(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].Issuer.CommonName +} + +func load(n string, assets map[string][]byte) []byte { + return assets[n] +} + +func renderFile(name string, tb []byte, data interface{}) ([]byte, error) { + tmpl, err := template.New(name).Funcs(templateFuncs).Parse(string(tb)) + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/events/recorder.go b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/events/recorder.go new file mode 100644 index 000000000..7c67b03a8 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/events/recorder.go @@ -0,0 +1,130 @@ +package events + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/klog/v2" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // WithContext allows to set a context for event create API calls. + WithContext(ctx context.Context) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string + + Shutdown() +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string + + // TODO: This is not the right way to pass the context, but there is no other way without breaking event interface + ctx context.Context +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) Shutdown() {} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithContext(ctx context.Context) Recorder { + r.ctx = ctx + return r +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + ctx := context.Background() + if r.ctx != nil { + ctx = r.ctx + } + if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + ctx := context.Background() + if r.ctx != nil { + ctx = r.ctx + } + if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/base_controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/base_controller.go new file mode 100644 index 000000000..5ff93519b --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/base_controller.go @@ -0,0 +1,168 @@ +package factory + +import ( + "context" + "fmt" + "sync" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +var defaultCacheSyncTimeout = 10 * time.Minute + +// baseController represents generic Kubernetes controller boiler-plate +type baseController struct { + name string + cachesToSync []cache.InformerSynced + sync func(ctx context.Context, controllerContext SyncContext, key string) error + syncContext SyncContext + resyncEvery time.Duration + cacheSyncTimeout time.Duration +} + +var _ Controller = &baseController{} + +func (c baseController) Name() string { + return c.name +} + +func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { + return fmt.Errorf("unable to sync caches for %s", controllerName) + } + + klog.Infof("Caches are synced for %s ", controllerName) + + return nil +} + +func (c *baseController) SyncContext() SyncContext { + return c.syncContext +} + +func (c *baseController) Run(ctx context.Context, workers int) { + // give caches 10 minutes to sync + cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout) + defer cacheSyncCancel() + err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...) + if err != nil { + select { + case <-ctx.Done(): + // Exit gracefully because the controller was requested to stop. + return + default: + // If caches did not sync after 10 minutes, it has taken oddly long and + // we should provide feedback. Since the control loops will never start, + // it is safer to exit with a good message than to continue with a dead loop. + // TODO: Consider making this behavior configurable. + klog.Exit(err) + } + } + + var workerWg sync.WaitGroup + defer func() { + defer klog.Infof("All %s workers have been terminated", c.name) + workerWg.Wait() + }() + + // queueContext is used to track and initiate queue shutdown + queueContext, queueContextCancel := context.WithCancel(context.TODO()) + + for i := 1; i <= workers; i++ { + klog.Infof("Starting #%d worker of %s controller ...", i, c.name) + workerWg.Add(1) + go func() { + defer func() { + klog.Infof("Shutting down worker of %s controller ...", c.name) + workerWg.Done() + }() + c.runWorker(queueContext) + }() + } + + // runPeriodicalResync is independent from queue + if c.resyncEvery > 0 { + workerWg.Add(1) + go func() { + defer workerWg.Done() + c.runPeriodicalResync(ctx, c.resyncEvery) + }() + } + + // Handle controller shutdown + + <-ctx.Done() // wait for controller context to be cancelled + c.syncContext.Queue().ShutDown() // shutdown the controller queue first + queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown + + // Wait for all workers to finish their job. + // at this point the Run() can hang and caller have to implement the logic that will kill + // this controller (SIGKILL). + klog.Infof("Shutting down %s ...", c.name) +} + +func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext, key string) error { + return c.sync(ctx, syncCtx, key) +} + +func (c *baseController) runPeriodicalResync(ctx context.Context, interval time.Duration) { + if interval == 0 { + return + } + go wait.UntilWithContext(ctx, func(ctx context.Context) { + c.syncContext.Queue().Add(DefaultQueueKey) + }, interval) +} + +// runWorker runs a single worker +// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time +// to complete its shutdown. +func (c *baseController) runWorker(queueCtx context.Context) { + wait.UntilWithContext( + queueCtx, + func(queueCtx context.Context) { + for { + select { + case <-queueCtx.Done(): + return + default: + c.processNextWorkItem(queueCtx) + } + } + }, + 1*time.Second) +} + +func (c *baseController) processNextWorkItem(queueCtx context.Context) { + key, quit := c.syncContext.Queue().Get() + if quit { + return + } + defer c.syncContext.Queue().Done(key) + + syncCtx := c.syncContext.(syncContext) + var ok bool + queueKey, ok := key.(string) + if !ok { + utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key)) + return + } + + if err := c.sync(queueCtx, syncCtx, queueKey); err != nil { + if klog.V(4).Enabled() || key != "key" { + utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err)) + } else { + utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err)) + } + c.syncContext.Queue().AddRateLimited(key) + return + } + + c.syncContext.Queue().Forget(key) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/controller_context.go b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/controller_context.go new file mode 100644 index 000000000..74b329fb7 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/controller_context.go @@ -0,0 +1,77 @@ +package factory + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +// syncContext implements SyncContext and provide user access to queue and object that caused +// the sync to be triggered. +type syncContext struct { + queue workqueue.RateLimitingInterface +} + +var _ SyncContext = syncContext{} + +// NewSyncContext gives new sync context. +func NewSyncContext(name string) SyncContext { + return syncContext{ + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + } +} + +func (c syncContext) Queue() workqueue.RateLimitingInterface { + return c.queue +} + +// eventHandler provides default event handler that is added to an informers passed to controller factory. +func (c syncContext) eventHandler(queueKeysFunc ObjectQueueKeysFunc, filter EventFilterFunc) cache.ResourceEventHandler { + resourceEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + UpdateFunc: func(old, new interface{}) { + runtimeObj, ok := new.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + DeleteFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + c.enqueueKeys(queueKeysFunc(tombstone.Obj.(runtime.Object))...) + + return + } + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + } + if filter == nil { + return resourceEventHandler + } + return cache.FilteringResourceEventHandler{ + FilterFunc: filter, + Handler: resourceEventHandler, + } +} + +func (c syncContext) enqueueKeys(keys ...string) { + for _, qKey := range keys { + c.queue.Add(qKey) + } +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/factory.go b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/factory.go new file mode 100644 index 000000000..5a3184760 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/factory.go @@ -0,0 +1,195 @@ +package factory + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" +) + +// DefaultQueueKey is the queue key used for string trigger based controllers. +const DefaultQueueKey = "key" + +// DefaultQueueKeysFunc returns a slice with a single element - the DefaultQueueKey +func DefaultQueueKeysFunc(_ runtime.Object) []string { + return []string{DefaultQueueKey} +} + +// Factory is generator that generate standard Kubernetes controllers. +// Factory is really generic and should be only used for simple controllers that does not require special stuff.. +type Factory struct { + sync SyncFunc + syncContext SyncContext + resyncInterval time.Duration + informers []filteredInformers + informerQueueKeys []informersWithQueueKey + bareInformers []Informer + cachesToSync []cache.InformerSynced +} + +// Informer represents any structure that allow to register event handlers and informs if caches are synced. +// Any SharedInformer will comply. +type Informer interface { + AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) + HasSynced() bool +} + +type informersWithQueueKey struct { + informers []Informer + filter EventFilterFunc + queueKeyFn ObjectQueueKeysFunc +} + +type filteredInformers struct { + informers []Informer + filter EventFilterFunc +} + +// ObjectQueueKeysFunc is used to make a string work queue keys out of the runtime object that is passed to it. +// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string +// triggers. +type ObjectQueueKeysFunc func(runtime.Object) []string + +// EventFilterFunc is used to filter informer events to prevent Sync() from being called +type EventFilterFunc func(obj interface{}) bool + +// New return new factory instance. +func New() *Factory { + return &Factory{} +} + +// WithSync is used to set the controller synchronization function. This function is the core of the controller and is +// usually hold the main controller logic. +func (f *Factory) WithSync(syncFn SyncFunc) *Factory { + f.sync = syncFn + return f +} + +// WithInformers is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +func (f *Factory) WithInformers(informers ...Informer) *Factory { + f.WithFilteredEventsInformers(nil, informers...) + return f +} + +// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions. +// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory { + f.informers = append(f.informers, filteredInformers{ + informers: informers, + filter: filter, + }) + return f +} + +// WithBareInformers allow to register informer that already has custom event handlers registered and no additional +// event handlers will be added to this informer. +// The controller will wait for the cache of this informer to be synced. +// The existing event handlers will have to respect the queue key function or the sync() implementation will have to +// count with custom queue keys. +func (f *Factory) WithBareInformers(informers ...Informer) *Factory { + f.bareInformers = append(f.bareInformers, informers...) + return f +} + +// WithInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +func (f *Factory) WithInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithFilteredEventsInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, filter EventFilterFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + filter: filter, + queueKeyFn: queueKeyFn, + }) + return f +} + +// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers. +// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked. +// If this is not called, no periodical resync will happen. +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncEvery(interval time.Duration) *Factory { + f.resyncInterval = interval + return f +} + +// WithSyncContext allows to specify custom, existing sync context for this factory. +// This is useful during unit testing where you can override the default event recorder or mock the runtime objects. +// If this function not called, a SyncContext is created by the factory automatically. +func (f *Factory) WithSyncContext(ctx SyncContext) *Factory { + f.syncContext = ctx + return f +} + +// ToController produce a runnable controller. +func (f *Factory) ToController(name string) Controller { + if f.sync == nil { + panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name)) + } + + var ctx SyncContext + if f.syncContext != nil { + ctx = f.syncContext + } else { + ctx = NewSyncContext(name) + } + + c := &baseController{ + name: name, + sync: f.sync, + resyncEvery: f.resyncInterval, + cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...), + syncContext: ctx, + cacheSyncTimeout: defaultCacheSyncTimeout, + } + + for i := range f.informerQueueKeys { + for d := range f.informerQueueKeys[i].informers { + informer := f.informerQueueKeys[i].informers[d] + queueKeyFn := f.informerQueueKeys[i].queueKeyFn + _, err := informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter)) + if err != nil { + utilruntime.HandleError(err) + } + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.informers { + for d := range f.informers[i].informers { + informer := f.informers[i].informers[d] + _, err := informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter)) + if err != nil { + utilruntime.HandleError(err) + } + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for _, bareInformer := range f.bareInformers { + c.cachesToSync = append(c.cachesToSync, bareInformer.HasSynced) + } + + return c +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/interfaces.go b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/interfaces.go new file mode 100644 index 000000000..2bc98b8f0 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/basecontroller/factory/interfaces.go @@ -0,0 +1,42 @@ +package factory + +import ( + "context" + + "k8s.io/client-go/util/workqueue" +) + +// Controller interface represents a runnable Kubernetes controller. +// Cancelling the syncContext passed will cause the controller to shutdown. +// Number of workers determine how much parallel the job processing should be. +type Controller interface { + // Run runs the controller and blocks until the controller is finished. + // Number of workers can be specified via workers parameter. + // This function will return when all internal loops are finished. + // Note that having more than one worker usually means handing parallelization of Sync(). + Run(ctx context.Context, workers int) + + // Sync contain the main controller logic. + // This should not be called directly, but can be used in unit tests to exercise the sync. + Sync(ctx context.Context, controllerContext SyncContext, key string) error + + // Name returns the controller name string. + Name() string + + // SyncContext returns the SyncContext of this controller + SyncContext() SyncContext +} + +// SyncContext interface represents a context given to the Sync() function where the main controller logic happen. +// SyncContext exposes controller name and give user access to the queue (for manual requeue). +// SyncContext also provides metadata about object that informers observed as changed. +type SyncContext interface { + // Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return + // an error, the object is automatically re-queued. Use with caution. + Queue() workqueue.RateLimitingInterface +} + +// SyncFunc is a function that contain main controller logic. +// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down. +// The syncContext provides access to controller name, queue and event recorder. +type SyncFunc func(ctx context.Context, controllerContext SyncContext, key string) error diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/index/index.go b/vendor/open-cluster-management.io/addon-framework/pkg/index/index.go new file mode 100644 index 000000000..bc6b3443e --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/index/index.go @@ -0,0 +1,252 @@ +package index + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" +) + +const ( + ClusterManagementAddonByPlacement = "clusterManagementAddonByPlacement" + ManagedClusterAddonByName = "managedClusterAddonByName" +) + +func IndexClusterManagementAddonByPlacement(obj interface{}) ([]string, error) { + cma, ok := obj.(*addonv1alpha1.ClusterManagementAddOn) + + if !ok { + return []string{}, fmt.Errorf("obj %T is not a ClusterManagementAddon", obj) + } + + var keys []string + if cma.Spec.InstallStrategy.Type == "" || cma.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + return keys, nil + } + + for _, placement := range cma.Spec.InstallStrategy.Placements { + key := fmt.Sprintf("%s/%s", placement.PlacementRef.Namespace, placement.PlacementRef.Name) + keys = append(keys, key) + } + + return keys, nil +} + +func IndexManagedClusterAddonByName(obj interface{}) ([]string, error) { + mca, ok := obj.(*addonv1alpha1.ManagedClusterAddOn) + + if !ok { + return []string{}, fmt.Errorf("obj %T is not a ManagedClusterAddon", obj) + } + + return []string{mca.Name}, nil +} + +func ClusterManagementAddonByPlacementQueueKey( + cmai addoninformerv1alpha1.ClusterManagementAddOnInformer) func(obj runtime.Object) []string { + return func(obj runtime.Object) []string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return []string{} + } + + objs, err := cmai.Informer().GetIndexer().ByIndex(ClusterManagementAddonByPlacement, key) + if err != nil { + utilruntime.HandleError(err) + return []string{} + } + + var keys []string + for _, o := range objs { + cma := o.(*addonv1alpha1.ClusterManagementAddOn) + klog.V(4).Infof("enqueue ClusterManagementAddon %s, because of placement %s", cma.Name, key) + keys = append(keys, cma.Name) + } + + return keys + } +} + +func ClusterManagementAddonByPlacementDecisionQueueKey( + cmai addoninformerv1alpha1.ClusterManagementAddOnInformer) func(obj runtime.Object) []string { + return func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + placementName, ok := accessor.GetLabels()[clusterv1beta1.PlacementLabel] + if !ok { + return []string{} + } + + objs, err := cmai.Informer().GetIndexer().ByIndex(ClusterManagementAddonByPlacement, + fmt.Sprintf("%s/%s", accessor.GetNamespace(), placementName)) + if err != nil { + utilruntime.HandleError(err) + return []string{} + } + + var keys []string + for _, o := range objs { + cma := o.(*addonv1alpha1.ClusterManagementAddOn) + klog.V(4).Infof("enqueue ClusterManagementAddon %s, because of placementDecision %s/%s", + cma.Name, accessor.GetNamespace(), accessor.GetName()) + keys = append(keys, cma.Name) + } + + return keys + } +} + +const ( + ManifestWorkByAddon = "manifestWorkByAddon" + ManifestWorkByHostedAddon = "manifestWorkByHostedAddon" + ManifestWorkHookByHostedAddon = "manifestWorkHookByHostedAddon" +) + +func IndexManifestWorkByAddon(obj interface{}) ([]string, error) { + work, ok := obj.(*workapiv1.ManifestWork) + if !ok { + return []string{}, fmt.Errorf("obj is supposed to be a ManifestWork, but is %T", obj) + } + + addonName, addonNamespace, isHook := extractAddonFromWork(work) + + if len(addonName) == 0 || len(addonNamespace) > 0 || isHook { + return []string{}, nil + } + + return []string{fmt.Sprintf("%s/%s", work.Namespace, addonName)}, nil +} + +func IndexManifestWorkByHostedAddon(obj interface{}) ([]string, error) { + work, ok := obj.(*workapiv1.ManifestWork) + if !ok { + return []string{}, fmt.Errorf("obj is supposed to be a ManifestWork, but is %T", obj) + } + + addonName, addonNamespace, isHook := extractAddonFromWork(work) + + if len(addonName) == 0 || len(addonNamespace) == 0 || isHook { + return []string{}, nil + } + + return []string{fmt.Sprintf("%s/%s", addonNamespace, addonName)}, nil +} + +func IndexManifestWorkHookByHostedAddon(obj interface{}) ([]string, error) { + work, ok := obj.(*workapiv1.ManifestWork) + if !ok { + return []string{}, fmt.Errorf("obj is supposed to be a ManifestWork, but is %T", obj) + } + + addonName, addonNamespace, isHook := extractAddonFromWork(work) + + if len(addonName) == 0 || len(addonNamespace) == 0 || !isHook { + return []string{}, nil + } + + return []string{fmt.Sprintf("%s/%s", addonNamespace, addonName)}, nil +} + +func extractAddonFromWork(work *workapiv1.ManifestWork) (string, string, bool) { + if len(work.Labels) == 0 { + return "", "", false + } + + addonName, ok := work.Labels[addonv1alpha1.AddonLabelKey] + if !ok { + return "", "", false + } + + addonNamespace := work.Labels[addonv1alpha1.AddonNamespaceLabelKey] + + isHook := false + if strings.HasPrefix(work.Name, constants.PreDeleteHookWorkName(addonName)) { + isHook = true + } + + return addonName, addonNamespace, isHook +} + +const ( + AddonByConfig = "addonByConfig" +) + +func IndexAddonByConfig(obj interface{}) ([]string, error) { + addon, ok := obj.(*addonv1alpha1.ManagedClusterAddOn) + if !ok { + return nil, fmt.Errorf("obj is supposed to be a ManagedClusterAddOn, but is %T", obj) + } + + getIndex := func(config addonv1alpha1.ConfigReference) string { + if config.Namespace != "" { + return fmt.Sprintf("%s/%s/%s/%s", config.Group, config.Resource, config.Namespace, config.Name) + } + + return fmt.Sprintf("%s/%s/%s", config.Group, config.Resource, config.Name) + } + + configNames := []string{} + for _, configReference := range addon.Status.ConfigReferences { + if configReference.Name == "" { + // bad config reference, ignore + continue + } + + configNames = append(configNames, getIndex(configReference)) + } + + return configNames, nil +} + +const ( + ClusterManagementAddonByConfig = "clusterManagementAddonByConfig" +) + +func IndexClusterManagementAddonByConfig(obj interface{}) ([]string, error) { + cma, ok := obj.(*addonv1alpha1.ClusterManagementAddOn) + if !ok { + return nil, fmt.Errorf("obj is supposed to be a ClusterManagementAddOn, but is %T", obj) + } + + getIndex := func(gr addonv1alpha1.ConfigGroupResource, configSpecHash addonv1alpha1.ConfigSpecHash) string { + if configSpecHash.Namespace != "" { + return fmt.Sprintf("%s/%s/%s/%s", gr.Group, gr.Resource, configSpecHash.Namespace, configSpecHash.Name) + } + + return fmt.Sprintf("%s/%s/%s", gr.Group, gr.Resource, configSpecHash.Name) + } + + configNames := sets.New[string]() + for _, defaultConfigRef := range cma.Status.DefaultConfigReferences { + if defaultConfigRef.DesiredConfig == nil || defaultConfigRef.DesiredConfig.Name == "" { + // bad config reference, ignore + continue + } + + configNames.Insert(getIndex(defaultConfigRef.ConfigGroupResource, *defaultConfigRef.DesiredConfig)) + } + + for _, installProgression := range cma.Status.InstallProgressions { + for _, configReference := range installProgression.ConfigReferences { + if configReference.DesiredConfig == nil || configReference.DesiredConfig.Name == "" { + // bad config reference, ignore + continue + } + + configNames.Insert(getIndex(configReference.ConfigGroupResource, *configReference.DesiredConfig)) + } + } + + return configNames.UnsortedList(), nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go new file mode 100644 index 000000000..d4285fb58 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go @@ -0,0 +1,114 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type managedClusterAddonConfigurationReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *managedClusterAddonConfigurationReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + + for _, addon := range graph.addonToUpdate() { + mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs) + err := d.patchAddonStatus(ctx, mca, addon.mca) + if err != nil { + errs = append(errs, err) + } + } + + return cma, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *managedClusterAddonConfigurationReconciler) mergeAddonConfig( + mca *addonv1alpha1.ManagedClusterAddOn, desiredConfigMap addonConfigMap) *addonv1alpha1.ManagedClusterAddOn { + mcaCopy := mca.DeepCopy() + + var mergedConfigs []addonv1alpha1.ConfigReference + // remove configs that are not desired + for _, config := range mcaCopy.Status.ConfigReferences { + if _, ok := desiredConfigMap[config.ConfigGroupResource]; ok { + mergedConfigs = append(mergedConfigs, config) + } + } + + // append or update configs + for _, config := range desiredConfigMap { + var match bool + for i := range mergedConfigs { + if mergedConfigs[i].ConfigGroupResource != config.ConfigGroupResource { + continue + } + + match = true + // set LastObservedGeneration to 0 when config name/namespace changes + if mergedConfigs[i].DesiredConfig != nil && (mergedConfigs[i].DesiredConfig.ConfigReferent != config.DesiredConfig.ConfigReferent) { + mergedConfigs[i].LastObservedGeneration = 0 + } + mergedConfigs[i].ConfigReferent = config.ConfigReferent + mergedConfigs[i].DesiredConfig = config.DesiredConfig.DeepCopy() + } + + if !match { + mergedConfigs = append(mergedConfigs, config) + } + } + + mcaCopy.Status.ConfigReferences = mergedConfigs + return mcaCopy +} + +func (d *managedClusterAddonConfigurationReconciler) patchAddonStatus(ctx context.Context, new, old *addonv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: old.Status.Namespace, + ConfigReferences: old.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: new.Status.Namespace, + ConfigReferences: new.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go new file mode 100644 index 000000000..32311511e --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go @@ -0,0 +1,209 @@ +package addonconfiguration + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" +) + +// addonConfigurationController is a controller to update configuration of mca with the following order +// 1. use configuration in mca spec if it is set +// 2. use configuration in install strategy +// 3. use configuration in the default configuration in cma +type addonConfigurationController struct { + addonClient addonv1alpha1client.Interface + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + managedClusterAddonIndexer cache.Indexer + addonFilterFunc factory.EventFilterFunc + placementLister clusterlisterv1beta1.PlacementLister + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister + + reconcilers []addonConfigurationReconcile +} + +type addonConfigurationReconcile interface { + reconcile(ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, + graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) +} + +type reconcileState int64 + +const ( + reconcileStop reconcileState = iota + reconcileContinue +) + +func NewAddonConfigurationController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + placementInformer clusterinformersv1beta1.PlacementInformer, + placementDecisionInformer clusterinformersv1beta1.PlacementDecisionInformer, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + c := &addonConfigurationController{ + addonClient: addonClient, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + managedClusterAddonIndexer: addonInformers.Informer().GetIndexer(), + addonFilterFunc: addonFilterFunc, + } + + c.reconcilers = []addonConfigurationReconcile{ + &managedClusterAddonConfigurationReconciler{ + addonClient: addonClient, + }, + &clusterManagementAddonProgressingReconciler{ + addonClient: addonClient, + }, + } + + controllerFactory := factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, + clusterManagementAddonInformers.Informer()).WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()) + + // This is to handle the case the self managed addon-manager does not have placementInformer/placementDecisionInformer. + // we will not consider installStrategy related placement for self managed addon-manager. + if placementInformer != nil && placementDecisionInformer != nil { + controllerFactory = controllerFactory.WithInformersQueueKeysFunc( + index.ClusterManagementAddonByPlacementDecisionQueueKey(clusterManagementAddonInformers), placementDecisionInformer.Informer()). + WithInformersQueueKeysFunc(index.ClusterManagementAddonByPlacementQueueKey(clusterManagementAddonInformers), placementInformer.Informer()) + c.placementLister = placementInformer.Lister() + c.placementDecisionLister = placementDecisionInformer.Lister() + } + + return controllerFactory.WithSync(c.sync).ToController("addon-configuration-controller") +} + +func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + klog.V(4).Infof("Reconciling addon %q", addonName) + + cma, err := c.clusterManagementAddonLister.Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + if !c.addonFilterFunc(cma) { + return nil + } + + cma = cma.DeepCopy() + graph, err := c.buildConfigurationGraph(cma) + if err != nil { + return err + } + + var state reconcileState + var errs []error + for _, reconciler := range c.reconcilers { + cma, state, err = reconciler.reconcile(ctx, cma, graph) + if err != nil { + errs = append(errs, err) + } + if state == reconcileStop { + break + } + } + + return utilerrors.NewAggregate(errs) +} + +func (c *addonConfigurationController) buildConfigurationGraph(cma *addonv1alpha1.ClusterManagementAddOn) (*configurationGraph, error) { + graph := newGraph(cma.Spec.SupportedConfigs, cma.Status.DefaultConfigReferences) + addons, err := c.managedClusterAddonIndexer.ByIndex(index.ManagedClusterAddonByName, cma.Name) + if err != nil { + return graph, err + } + + // add all existing addons to the default at first + for _, addonObject := range addons { + addon := addonObject.(*addonv1alpha1.ManagedClusterAddOn) + graph.addAddonNode(addon) + } + + if cma.Spec.InstallStrategy.Type == "" || cma.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + return graph, nil + } + + // check each install strategy in status + var errs []error + for _, installProgression := range cma.Status.InstallProgressions { + clusters, err := c.getClustersByPlacement(installProgression.PlacementRef.Name, installProgression.PlacementRef.Namespace) + if errors.IsNotFound(err) { + klog.V(2).Infof("placement %s/%s is not found for addon %s", installProgression.PlacementRef.Namespace, installProgression.PlacementRef.Name, cma.Name) + continue + } + if err != nil { + errs = append(errs, err) + continue + } + + for _, installStrategy := range cma.Spec.InstallStrategy.Placements { + if installStrategy.PlacementRef == installProgression.PlacementRef { + graph.addPlacementNode(installStrategy, installProgression, clusters) + + } + } + } + + return graph, utilerrors.NewAggregate(errs) +} + +func (c *addonConfigurationController) getClustersByPlacement(name, namespace string) ([]string, error) { + var clusters []string + if c.placementLister == nil || c.placementDecisionLister == nil { + return clusters, nil + } + _, err := c.placementLister.Placements(namespace).Get(name) + if err != nil { + return clusters, err + } + + decisionSelector := labels.SelectorFromSet(labels.Set{ + clusterv1beta1.PlacementLabel: name, + }) + decisions, err := c.placementDecisionLister.PlacementDecisions(namespace).List(decisionSelector) + if err != nil { + return clusters, err + } + + for _, d := range decisions { + for _, sd := range d.Status.Decisions { + clusters = append(clusters, sd.ClusterName) + } + } + + return clusters, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go new file mode 100644 index 000000000..ea766499b --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go @@ -0,0 +1,342 @@ +package addonconfiguration + +import ( + "fmt" + "math" + "sort" + "strconv" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +var ( + defaultMaxConcurrency = intstr.FromString("25%") + maxMaxConcurrency = intstr.FromString("100%") +) + +// configurationTree is a 2 level snapshot tree on the configuration of addons +// the first level is a list of nodes that represents a install strategy and a desired configuration for this install +// strategy. The second level is a list of nodes that represent each mca and its desired configuration +type configurationGraph struct { + // nodes maintains a list between a installStrategy and its related mcas + nodes []*installStrategyNode + // defaults is the nodes with no install strategy + defaults *installStrategyNode +} + +// installStrategyNode is a node in configurationGraph defined by a install strategy +type installStrategyNode struct { + placementRef addonv1alpha1.PlacementRef + maxConcurrency intstr.IntOrString + desiredConfigs addonConfigMap + // children keeps a map of addons node as the children of this node + children map[string]*addonNode + clusters sets.Set[string] +} + +// addonNode is node as a child of installStrategy node represting a mca +// addonnode +type addonNode struct { + desiredConfigs addonConfigMap + mca *addonv1alpha1.ManagedClusterAddOn + // record mca upgrade status + mcaUpgradeStatus upgradeStatus +} + +type upgradeStatus int + +const ( + // mca desired configs not synced from desiredConfigs yet + toupgrade upgradeStatus = iota + // mca desired configs upgraded and last applied configs not upgraded + upgrading + // both desired configs and last applied configs are upgraded + upgraded +) + +type addonConfigMap map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference + +// set addon upgrade status +func (n *addonNode) setUpgradeStatus() { + if len(n.mca.Status.ConfigReferences) != len(n.desiredConfigs) { + n.mcaUpgradeStatus = toupgrade + return + } + + for _, actual := range n.mca.Status.ConfigReferences { + if desired, ok := n.desiredConfigs[actual.ConfigGroupResource]; ok { + if !equality.Semantic.DeepEqual(desired.DesiredConfig, actual.DesiredConfig) { + n.mcaUpgradeStatus = toupgrade + return + } else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) { + n.mcaUpgradeStatus = upgrading + return + } + } else { + n.mcaUpgradeStatus = toupgrade + return + } + } + + n.mcaUpgradeStatus = upgraded +} + +func (d addonConfigMap) copy() addonConfigMap { + output := addonConfigMap{} + for k, v := range d { + output[k] = v + } + return output +} + +func newGraph(supportedConfigs []addonv1alpha1.ConfigMeta, defaultConfigReferences []addonv1alpha1.DefaultConfigReference) *configurationGraph { + graph := &configurationGraph{ + nodes: []*installStrategyNode{}, + defaults: &installStrategyNode{ + maxConcurrency: maxMaxConcurrency, + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{}, + children: map[string]*addonNode{}, + }, + } + + // init graph.defaults.desiredConfigs with supportedConfigs + for _, config := range supportedConfigs { + if config.DefaultConfig != nil { + graph.defaults.desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: *config.DefaultConfig, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: *config.DefaultConfig, + }, + } + } + } + // copy the spechash from cma status defaultConfigReferences + for _, configRef := range defaultConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + defaultsDesiredConfig, ok := graph.defaults.desiredConfigs[configRef.ConfigGroupResource] + if ok && (defaultsDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + defaultsDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + + return graph +} + +// addAddonNode to the graph, starting from placement with the highest order +func (g *configurationGraph) addAddonNode(mca *addonv1alpha1.ManagedClusterAddOn) { + for i := len(g.nodes) - 1; i >= 0; i-- { + if g.nodes[i].clusters.Has(mca.Namespace) { + g.nodes[i].addNode(mca) + return + } + } + + g.defaults.addNode(mca) +} + +// addNode delete clusters on existing graph so the new configuration overrides the previous +func (g *configurationGraph) addPlacementNode( + installStrategy addonv1alpha1.PlacementStrategy, + installProgression addonv1alpha1.InstallProgression, + clusters []string, +) { + placementRef := installProgression.PlacementRef + installConfigReference := installProgression.ConfigReferences + + node := &installStrategyNode{ + placementRef: placementRef, + maxConcurrency: maxMaxConcurrency, + desiredConfigs: g.defaults.desiredConfigs, + children: map[string]*addonNode{}, + clusters: sets.New[string](clusters...), + } + + // set max concurrency + if installStrategy.RolloutStrategy.Type == addonv1alpha1.AddonRolloutStrategyRollingUpdate { + if installStrategy.RolloutStrategy.RollingUpdate != nil { + node.maxConcurrency = installStrategy.RolloutStrategy.RollingUpdate.MaxConcurrency + } else { + node.maxConcurrency = defaultMaxConcurrency + } + } + + // overrides configuration by install strategy + if len(installConfigReference) > 0 { + node.desiredConfigs = node.desiredConfigs.copy() + for _, configRef := range installConfigReference { + if configRef.DesiredConfig == nil { + continue + } + node.desiredConfigs[configRef.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: configRef.ConfigGroupResource, + ConfigReferent: configRef.DesiredConfig.ConfigReferent, + DesiredConfig: configRef.DesiredConfig.DeepCopy(), + } + } + } + + // remove addon in defaults and other placements. + for _, cluster := range clusters { + if _, ok := g.defaults.children[cluster]; ok { + node.addNode(g.defaults.children[cluster].mca) + delete(g.defaults.children, cluster) + } + for _, placement := range g.nodes { + if _, ok := placement.children[cluster]; ok { + node.addNode(placement.children[cluster].mca) + delete(placement.children, cluster) + } + } + } + g.nodes = append(g.nodes, node) +} + +func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]*installStrategyNode { + placementNodeMap := map[addonv1alpha1.PlacementRef]*installStrategyNode{} + for _, node := range g.nodes { + placementNodeMap[node.placementRef] = node + } + + return placementNodeMap +} + +func (g *configurationGraph) addonToUpdate() []*addonNode { + var addons []*addonNode + for _, node := range g.nodes { + addons = append(addons, node.addonToUpdate()...) + } + + addons = append(addons, g.defaults.addonToUpdate()...) + + return addons +} + +func (n *installStrategyNode) addNode(addon *addonv1alpha1.ManagedClusterAddOn) { + n.children[addon.Namespace] = &addonNode{ + mca: addon, + desiredConfigs: n.desiredConfigs, + } + + // override configuration by mca spec + if len(addon.Spec.Configs) > 0 { + n.children[addon.Namespace].desiredConfigs = n.children[addon.Namespace].desiredConfigs.copy() + // TODO we should also filter out the configs which are not supported configs. + for _, config := range addon.Spec.Configs { + n.children[addon.Namespace].desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: config.ConfigReferent, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: config.ConfigReferent, + }, + } + // copy the spechash from mca status + for _, configRef := range addon.Status.ConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + nodeDesiredConfig, ok := n.children[addon.Namespace].desiredConfigs[configRef.ConfigGroupResource] + if ok && (nodeDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + nodeDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + } + } + + // set addon node upgrade status + n.children[addon.Namespace].setUpgradeStatus() +} + +func (n *installStrategyNode) addonUpgraded() int { + count := 0 + for _, addon := range n.children { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgraded { + count += 1 + } + } + return count +} + +func (n *installStrategyNode) addonUpgrading() int { + count := 0 + for _, addon := range n.children { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgrading { + count += 1 + } + } + return count +} + +// addonToUpdate finds the addons to be updated by placement +func (n *installStrategyNode) addonToUpdate() []*addonNode { + var addons []*addonNode + + // sort the children by key + keys := make([]string, 0, len(n.children)) + for k := range n.children { + keys = append(keys, k) + } + sort.Strings(keys) + + total := len(n.clusters) + if total == 0 { + total = len(n.children) + } + + length, _ := parseMaxConcurrency(n.maxConcurrency, total) + if length == 0 { + return addons + } + + for i, k := range keys { + if (i%length == 0) && len(addons) > 0 { + return addons + } + + addon := n.children[k] + if addon.mcaUpgradeStatus != upgraded { + addons = append(addons, addon) + } + } + + return addons +} + +func parseMaxConcurrency(maxConcurrency intstr.IntOrString, total int) (int, error) { + var length int + + switch maxConcurrency.Type { + case intstr.String: + str := maxConcurrency.StrVal + f, err := strconv.ParseFloat(str[:len(str)-1], 64) + if err != nil { + return length, err + } + length = int(math.Ceil(f / 100 * float64(total))) + case intstr.Int: + length = maxConcurrency.IntValue() + default: + return length, fmt.Errorf("incorrect MaxConcurrency type %v", maxConcurrency.Type) + } + + return length, nil +} + +func desiredConfigsEqual(a, b addonConfigMap) bool { + if len(a) != len(b) { + return false + } + + for configgrA := range a { + if a[configgrA] != b[configgrA] { + return false + } + } + + return true +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go new file mode 100644 index 000000000..4c44d700e --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go @@ -0,0 +1,139 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type clusterManagementAddonProgressingReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *clusterManagementAddonProgressingReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + cmaCopy := cma.DeepCopy() + placementNodes := graph.getPlacementNodes() + + // go through addons and update condition per install progression + for i, installProgression := range cmaCopy.Status.InstallProgressions { + placementNode, exist := placementNodes[installProgression.PlacementRef] + if !exist { + continue + } + + isUpgrade := false + + for _, configReference := range installProgression.ConfigReferences { + if configReference.LastAppliedConfig != nil { + isUpgrade = true + break + } + } + + setAddOnInstallProgressionsAndLastApplied(&cmaCopy.Status.InstallProgressions[i], + isUpgrade, + placementNode.addonUpgrading(), + placementNode.addonUpgraded(), + len(placementNode.clusters), + ) + } + + err := d.patchMgmtAddonStatus(ctx, cmaCopy, cma) + if err != nil { + errs = append(errs, err) + } + return cmaCopy, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *clusterManagementAddonProgressingReconciler) patchMgmtAddonStatus(ctx context.Context, new, old *addonv1alpha1.ClusterManagementAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: old.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: new.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching clustermanagementaddon %s status with %s", new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1.InstallProgression, isUpgrade bool, progressing, done, total int) { + // always update progressing condition when there is no config + // skip update progressing condition when last applied config already the same as desired + skip := len(installProgression.ConfigReferences) > 0 + for _, configReference := range installProgression.ConfigReferences { + if !equality.Semantic.DeepEqual(configReference.LastAppliedConfig, configReference.DesiredConfig) && + !equality.Semantic.DeepEqual(configReference.LastKnownGoodConfig, configReference.DesiredConfig) { + skip = false + } + } + if skip { + return + } + condition := metav1.Condition{ + Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, + } + if (total == 0 && done == 0) || (done != total) { + condition.Status = metav1.ConditionTrue + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgrading + condition.Message = fmt.Sprintf("%d/%d upgrading...", progressing+done, total) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstalling + condition.Message = fmt.Sprintf("%d/%d installing...", progressing+done, total) + } + } else { + for i, configRef := range installProgression.ConfigReferences { + installProgression.ConfigReferences[i].LastAppliedConfig = configRef.DesiredConfig.DeepCopy() + installProgression.ConfigReferences[i].LastKnownGoodConfig = configRef.DesiredConfig.DeepCopy() + } + condition.Status = metav1.ConditionFalse + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgradeSucceed + condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors.", done, total) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstallSucceed + condition.Message = fmt.Sprintf("%d/%d install completed with no errors.", done, total) + } + } + meta.SetStatusCondition(&installProgression.Conditions, condition) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go new file mode 100644 index 000000000..beff5b4bd --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go @@ -0,0 +1,100 @@ +package addonowner + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +const UnsupportedConfigurationType = "UnsupportedConfiguration" + +// addonOwnerController reconciles instances of managedclusteradd on the hub +// to add related ClusterManagementAddon as the owner. +type addonOwnerController struct { + addonClient addonv1alpha1client.Interface + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + addonFilterFunc factory.EventFilterFunc +} + +func NewAddonOwnerController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + c := &addonOwnerController{ + addonClient: addonClient, + managedClusterAddonLister: addonInformers.Lister(), + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + addonFilterFunc: addonFilterFunc, + } + + return factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, clusterManagementAddonInformers.Informer()). + WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()).WithSync(c.sync).ToController("addon-owner-controller") +} + +func (c *addonOwnerController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + klog.V(4).Infof("Reconciling addon %q", key) + + namespace, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(namespace).Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + addonCopy := addon.DeepCopy() + modified := false + + clusterManagementAddon, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + if !c.addonFilterFunc(clusterManagementAddon) { + return nil + } + + owner := metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) + modified = utils.MergeOwnerRefs(&addonCopy.OwnerReferences, *owner, false) + if modified { + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Update(ctx, addonCopy, metav1.UpdateOptions{}) + return err + } + + return nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/config_checker.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/config_checker.go new file mode 100644 index 000000000..bbfba2ffe --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/config_checker.go @@ -0,0 +1,134 @@ +package utils + +import ( + "crypto/sha256" + "fmt" + "net/http" + "os" + "path/filepath" + "sync" + + "k8s.io/apiserver/pkg/server/healthz" +) + +var _ healthz.HealthChecker = &configChecker{} + +// configChecker is used to notify container to restart when config files updated +type configChecker struct { + name string + configfiles []string + checksum [32]byte + reload bool + sync.Mutex +} + +// NewConfigChecker +// +// Parameters: +// * name could be any string. +// * configfiles should be the same as your target container are using now. +// +// There is two use cases: +// Case1: Embeding configchecker into the current server +// +// In this case, we simply initialize a configchecker and add it to the current in used healthz.Checkers. +// You can check here for a reference: +// +// https://github.com/open-cluster-management/multicloud-operators-foundation/blob/56270b1520ec5896981db689b3afe0cd893cad8e/cmd/agent/agent.go#L148 +// +// ----------------------------------------------------------------------------- +// +// Case2: Using configchecker as an independent process to watch another service +// +// Example Code: +// config_checker_server.go +// +// type configCheckerServer struct { +// checkers []heathz.HealthChecker +// } +// +// func NewConfigCheckerServer(checkers []healthz.HealthChecker) *configCheckerServer { +// return &configCheckerServer{checkers: checkers} +// } +// +// func (s *configCheckerServer) ServerHttp(rw http.ResponseWriter, r *http.Request) { +// for _, c := range s.chekers { +// if c.Name() == r.URL { +// if err := c.Check(); err != nil { +// rw.WriteHeader(500) +// } else { +// rw.WriteHeader(200) +// } +// } +// } +// } +// +// main.go +// ... +// configchecker := utils.NewConfigChecker("checker", "/config/server-config.yaml") +// configchecker.SetReload(true) +// ccServer := NewConfigCheckerServer([]healthz.HealthChecker{configchecker}) +// ... +// +// There are some watch-outs for this case: +// 1. One configchecker server for one target server, don't use one configchecker for multiple server. +// 2. Set `reload` to `true` by invoke `SetReload` function. +// 3. In deployment's livessProbe config, the `failureThreshold` must be `1`. +func NewConfigChecker(name string, configfiles ...string) (*configChecker, error) { + checksum, err := load(configfiles) + if err != nil { + return nil, err + } + return &configChecker{ + name: name, + configfiles: configfiles, + checksum: checksum, + reload: false, + }, nil +} + +// SetReload can update the ‘reload’ fields of config checker +// If reload equals to false, config checker won't update the checksum value in the cache, and function Check would +// return error forever if config files are modified. but if reload equals to true, config checker only returns err +// once, and it updates the cache with the latest checksum of config files. +func (c *configChecker) SetReload(reload bool) { + c.reload = reload +} + +// Name return the name fo the configChecker +func (c *configChecker) Name() string { + return c.name +} + +// Check would return nil if current configfiles's checksum is equal to cached checksum +// If checksum not equal, it will return err and update cached checksum with current checksum +// Note that: configChecker performs a instant update after it returns err, so DO NOT use one +// configChecker for multible containers!!! +func (cc *configChecker) Check(_ *http.Request) error { + newChecksum, err := load(cc.configfiles) + if err != nil { + return err + } + if newChecksum != cc.checksum { + cc.Lock() + if cc.reload { + cc.checksum = newChecksum // update checksum + } + cc.Unlock() + return fmt.Errorf("checksum not equal") + } + return nil +} + +// load generates a checksum of all config files' content +func load(configfiles []string) ([32]byte, error) { + var allContent []byte + for _, c := range configfiles { + content, err := os.ReadFile(filepath.Clean(c)) + if err != nil { + return [32]byte{}, fmt.Errorf("read %s failed, %v", c, err) + } + allContent = append(allContent, content...) + } + return sha256.Sum256(allContent), nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/csr_helpers.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/csr_helpers.go new file mode 100644 index 000000000..bc35dd12c --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/csr_helpers.go @@ -0,0 +1,219 @@ +package utils + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "math/big" + "strings" + "time" + + certificatesv1 "k8s.io/api/certificates/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/restmapper" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" +) + +var serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) + +// DefaultSignerWithExpiry generates a signer func for addon agent to sign the csr using caKey and caData with expiry date. +func DefaultSignerWithExpiry(caKey, caData []byte, duration time.Duration) agent.CSRSignerFunc { + return func(csr *certificatesv1.CertificateSigningRequest) []byte { + blockTlsCrt, _ := pem.Decode(caData) + if blockTlsCrt == nil { + klog.Errorf("Failed to decode cert") + return nil + } + certs, err := x509.ParseCertificates(blockTlsCrt.Bytes) + if err != nil { + klog.Errorf("Failed to parse cert: %v", err) + return nil + } + + blockTlsKey, _ := pem.Decode(caKey) + if blockTlsKey == nil { + klog.Errorf("Failed to decode key") + return nil + } + + // For now only PKCS#1 is supported which assures the private key algorithm is RSA. + // TODO: Compatibility w/ PKCS#8 key e.g. EC algorithm + key, err := x509.ParsePKCS1PrivateKey(blockTlsKey.Bytes) + if err != nil { + klog.Errorf("Failed to parse key: %v", err) + return nil + } + + data, err := signCSR(csr, certs[0], key, duration) + if err != nil { + klog.Errorf("Failed to sign csr: %v", err) + return nil + } + return data + } +} + +func signCSR(csr *certificatesv1.CertificateSigningRequest, caCert *x509.Certificate, caKey *rsa.PrivateKey, duration time.Duration) ([]byte, error) { + certExpiryDuration := duration + durationUntilExpiry := time.Until(caCert.NotAfter) + if durationUntilExpiry <= 0 { + return nil, fmt.Errorf("signer has expired, expired time: %v", caCert.NotAfter) + } + if durationUntilExpiry < certExpiryDuration { + certExpiryDuration = durationUntilExpiry + } + + request, err := parseCSR(csr.Spec.Request) + if err != nil { + return nil, err + } + + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, fmt.Errorf("unable to generate a serial number for %s: %v", request.Subject.CommonName, err) + } + + tmpl := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: request.Subject, + DNSNames: request.DNSNames, + IPAddresses: request.IPAddresses, + EmailAddresses: request.EmailAddresses, + URIs: request.URIs, + PublicKeyAlgorithm: request.PublicKeyAlgorithm, + PublicKey: request.PublicKey, + Extensions: request.Extensions, + ExtraExtensions: request.ExtraExtensions, + // Hard code the usage since it cannot be specified in registration process + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + } + + now := time.Now() + tmpl.NotBefore = now + tmpl.NotAfter = now.Add(certExpiryDuration) + + der, err := x509.CreateCertificate(rand.Reader, tmpl, caCert, request.PublicKey, caKey) + if err != nil { + return nil, fmt.Errorf("failed to sign certificate: %v", err) + } + + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: der, + }), nil +} + +func parseCSR(pemBytes []byte) (*x509.CertificateRequest, error) { + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, fmt.Errorf("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} + +// DefaultCSRApprover approve the csr when addon agent uses default group and default user to sign csr. +func DefaultCSRApprover(agentName string) agent.CSRApproveFunc { + return func( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, + csr *certificatesv1.CertificateSigningRequest) bool { + defaultGroups := agent.DefaultGroups(cluster.Name, addon.Name) + + defaultUser := agent.DefaultUser(cluster.Name, addon.Name, agentName) + // check org field and commonName field + block, _ := pem.Decode(csr.Spec.Request) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + klog.Infof("CSR Approve Check Failed csr %q was not recognized: PEM block type is not CERTIFICATE REQUEST", csr.Name) + return false + } + + x509cr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + klog.Infof("CSR Approve Check Failed csr %q was not recognized: %v", csr.Name, err) + return false + } + + requestingOrgs := sets.NewString(x509cr.Subject.Organization...) + if requestingOrgs.Len() != 3 { + klog.Infof("CSR Approve Check Failed csr %q org is not equal to 3", csr.Name) + return false + } + + for _, group := range defaultGroups { + if !requestingOrgs.Has(group) { + klog.Infof("CSR Approve Check Failed csr requesting orgs doesn't contain %s", group) + return false + } + } + + // check commonName field + if defaultUser != x509cr.Subject.CommonName { + klog.Infof("CSR Approve Check Failed commonName not right; request %s get %s", x509cr.Subject.CommonName, defaultUser) + return false + } + + // check user name + if strings.HasPrefix(csr.Spec.Username, "system:open-cluster-management:"+cluster.Name) { + klog.Info("CSR approved") + return true + } else { + klog.Info("CSR not approved due to illegal requester", "requester", csr.Spec.Username) + return false + } + } +} + +// UnionCSRApprover is a union func for multiple approvers +func UnionCSRApprover(approvers ...agent.CSRApproveFunc) agent.CSRApproveFunc { + return func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, csr *certificatesv1.CertificateSigningRequest) bool { + for _, approver := range approvers { + if !approver(cluster, addon, csr) { + return false + } + } + + return true + } +} + +// IsCSRSupported checks whether the cluster supports v1 or v1beta1 csr api. +func IsCSRSupported(nativeClient kubernetes.Interface) (bool, bool, error) { + mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(nativeClient.Discovery())) + mappings, err := mapper.RESTMappings(schema.GroupKind{ + Group: certificatesv1.GroupName, + Kind: "CertificateSigningRequest", + }) + if err != nil { + return false, false, err + } + v1CSRSupported := false + for _, mapping := range mappings { + if mapping.GroupVersionKind.Version == "v1" { + v1CSRSupported = true + } + } + v1beta1CSRSupported := false + for _, mapping := range mappings { + if mapping.GroupVersionKind.Version == "v1beta1" { + v1beta1CSRSupported = true + } + } + return v1CSRSupported, v1beta1CSRSupported, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/gvr.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/gvr.go new file mode 100644 index 000000000..457318d51 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/gvr.go @@ -0,0 +1,50 @@ +package utils + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var AddOnDeploymentConfigGVR = schema.GroupVersionResource{ + Group: "addon.open-cluster-management.io", + Version: "v1alpha1", + Resource: "addondeploymentconfigs", +} + +var AddOnTemplateGVR = schema.GroupVersionResource{ + Group: "addon.open-cluster-management.io", + Version: "v1alpha1", + Resource: "addontemplates", +} + +var BuiltInAddOnConfigGVRs = map[schema.GroupVersionResource]bool{ + AddOnDeploymentConfigGVR: true, + AddOnTemplateGVR: true, +} + +// ContainGR returns true if the given group resource is in the given map +func ContainGR(gvrs map[schema.GroupVersionResource]bool, group, resource string) bool { + for gvr := range gvrs { + if gvr.Group == group && gvr.Resource == resource { + return true + } + } + return false +} + +// FilterOutTheBuiltInAddOnConfigGVRs returns a new slice of GroupVersionResource that does not contain +// the built-in addOn config GVRs +func FilterOutTheBuiltInAddOnConfigGVRs( + gvrs map[schema.GroupVersionResource]bool) map[schema.GroupVersionResource]bool { + + newGVRs := make(map[schema.GroupVersionResource]bool) + for gvr := range gvrs { + if !isBuiltInAddOnConfigGVR(gvr.Group, gvr.Resource) { + newGVRs[gvr] = true + } + } + return newGVRs +} + +func isBuiltInAddOnConfigGVR(group, resource string) bool { + return ContainGR(BuiltInAddOnConfigGVRs, group, resource) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/helpers.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/helpers.go new file mode 100644 index 000000000..78b9d951e --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/helpers.go @@ -0,0 +1,370 @@ +package utils + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "reflect" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + + "open-cluster-management.io/addon-framework/pkg/agent" +) + +func MergeRelatedObjects(modified *bool, objs *[]addonapiv1alpha1.ObjectReference, obj addonapiv1alpha1.ObjectReference) { + if *objs == nil { + *objs = []addonapiv1alpha1.ObjectReference{} + } + + for _, o := range *objs { + if o.Group == obj.Group && o.Resource == obj.Resource && o.Name == obj.Name && o.Namespace == obj.Namespace { + return + } + } + + *objs = append(*objs, obj) + *modified = true +} + +// ApplyConfigMap merges objectmeta, requires data, ref from openshift/library-go +func ApplyConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { + existing, err := client.ConfigMaps(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.ConfigMaps(requiredCopy.Namespace). + Create(ctx, requiredCopy, metav1.CreateOptions{}) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + + var modifiedKeys []string + for existingCopyKey, existingCopyValue := range existingCopy.Data { + // if we're injecting a ca-bundle and the required isn't forcing the value, then don't use the value of existing + // to drive a diff detection. If required has set the value then we need to force the value in order to have apply + // behave predictably. + if requiredValue, ok := required.Data[existingCopyKey]; !ok || (existingCopyValue != requiredValue) { + modifiedKeys = append(modifiedKeys, "data."+existingCopyKey) + } + } + for existingCopyKey, existingCopyBinValue := range existingCopy.BinaryData { + if requiredBinValue, ok := required.BinaryData[existingCopyKey]; !ok || !bytes.Equal(existingCopyBinValue, requiredBinValue) { + modifiedKeys = append(modifiedKeys, "binaryData."+existingCopyKey) + } + } + for requiredKey := range required.Data { + if _, ok := existingCopy.Data[requiredKey]; !ok { + modifiedKeys = append(modifiedKeys, "data."+requiredKey) + } + } + for requiredBinKey := range required.BinaryData { + if _, ok := existingCopy.BinaryData[requiredBinKey]; !ok { + modifiedKeys = append(modifiedKeys, "binaryData."+requiredBinKey) + } + } + + dataSame := len(modifiedKeys) == 0 + if dataSame { + return existingCopy, false, nil + } + existingCopy.Data = required.Data + existingCopy.BinaryData = required.BinaryData + + actual, err := client.ConfigMaps(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + + return actual, true, err +} + +// ApplySecret merges objectmeta, requires data. ref from openshift/library-go +func ApplySecret(ctx context.Context, client coreclientv1.SecretsGetter, requiredInput *corev1.Secret) (*corev1.Secret, bool, error) { + // copy the stringData to data. Error on a data content conflict inside required. This is usually a bug. + + existing, err := client.Secrets(requiredInput.Namespace).Get(ctx, requiredInput.Name, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return nil, false, err + } + + required := requiredInput.DeepCopy() + if required.Data == nil { + required.Data = map[string][]byte{} + } + for k, v := range required.StringData { + if dataV, ok := required.Data[k]; ok { + if string(dataV) != v { + return nil, false, fmt.Errorf("Secret.stringData[%q] conflicts with Secret.data[%q]", k, k) + } + } + required.Data[k] = []byte(v) + } + required.StringData = nil + + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.Secrets(requiredCopy.Namespace). + Create(ctx, requiredCopy, metav1.CreateOptions{}) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + + switch required.Type { + case corev1.SecretTypeServiceAccountToken: + // Secrets for ServiceAccountTokens will have data injected by kube controller manager. + // We will apply only the explicitly set keys. + if existingCopy.Data == nil { + existingCopy.Data = map[string][]byte{} + } + + for k, v := range required.Data { + existingCopy.Data[k] = v + } + + default: + existingCopy.Data = required.Data + } + + existingCopy.Type = required.Type + + // Server defaults some values and we need to do it as well or it will never equal. + if existingCopy.Type == "" { + existingCopy.Type = corev1.SecretTypeOpaque + } + + if equality.Semantic.DeepEqual(existingCopy, existing) { + return existing, false, nil + } + + var actual *corev1.Secret + /* + * Kubernetes validation silently hides failures to update secret type. + * https://github.com/kubernetes/kubernetes/blob/98e65951dccfd40d3b4f31949c2ab8df5912d93e/pkg/apis/core/validation/validation.go#L5048 + * We need to explicitly opt for delete+create in that case. + */ + if existingCopy.Type == existing.Type { + actual, err = client.Secrets(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + + if err == nil { + return actual, true, nil + } + if !strings.Contains(err.Error(), "field is immutable") { + return actual, true, err + } + } + + // if the field was immutable on a secret, we're going to be stuck until we delete it. Try to delete and then create + deleteErr := client.Secrets(required.Namespace).Delete(ctx, existingCopy.Name, metav1.DeleteOptions{}) + if deleteErr != nil { + return actual, false, deleteErr + } + + // clear the RV and track the original actual and error for the return like our create value. + existingCopy.ResourceVersion = "" + actual, err = client.Secrets(required.Namespace).Create(ctx, existingCopy, metav1.CreateOptions{}) + return actual, true, err +} + +func MergeOwnerRefs(existing *[]metav1.OwnerReference, required metav1.OwnerReference, removeOwner bool) bool { + if *existing == nil { + *existing = []metav1.OwnerReference{} + } + + existedIndex := 0 + + for existedIndex < len(*existing) { + if ownerRefMatched(required, (*existing)[existedIndex]) { + break + } + existedIndex++ + } + + if existedIndex == len(*existing) { + // There is no matched ownerref found, append the ownerref + // if it is not to be removed. + if !removeOwner { + *existing = append(*existing, required) + return true + } + + return false + } + + if removeOwner { + *existing = append((*existing)[:existedIndex], (*existing)[existedIndex+1:]...) + return true + } + + if !reflect.DeepEqual(required, (*existing)[existedIndex]) { + (*existing)[existedIndex] = required + return true + } + + return false +} + +func ownerRefMatched(existing, required metav1.OwnerReference) bool { + if existing.Name != required.Name { + return false + } + + if existing.Kind != required.Kind { + return false + } + + existingGV, err := schema.ParseGroupVersion(existing.APIVersion) + + if err != nil { + return false + } + + requiredGV, err := schema.ParseGroupVersion(required.APIVersion) + + if err != nil { + return false + } + + if existingGV.Group != requiredGV.Group { + return false + } + + return true +} + +func PatchAddonCondition(ctx context.Context, addonClient addonv1alpha1client.Interface, new, old *addonapiv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status.Conditions, old.Status.Conditions) { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + Conditions: old.Status.Conditions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + Conditions: new.Status.Conditions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s condition with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +// AddonManagementFilterFunc is to check if the addon should be managed by addon manager or self-managed +type AddonManagementFilterFunc func(cma *addonapiv1alpha1.ClusterManagementAddOn) bool + +func ManagedByAddonManager(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + annotations := accessor.GetAnnotations() + if len(annotations) == 0 { + return false + } + + value, ok := annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] + if !ok { + return false + } + + return value == addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue +} + +func ManagedBySelf(agentAddons map[string]agent.AgentAddon) func(obj interface{}) bool { + return func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if _, ok := agentAddons[accessor.GetName()]; !ok { + return false + } + + annotations := accessor.GetAnnotations() + + if len(annotations) == 0 { + return true + } + + value, ok := annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] + if !ok { + return true + } + + return value == addonapiv1alpha1.AddonLifecycleSelfManageAnnotationValue + } +} + +func FilterByAddonName(agentAddons map[string]agent.AgentAddon) func(obj interface{}) bool { + return func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + _, ok := agentAddons[accessor.GetName()] + return ok + } +} + +func IsOwnedByCMA(addon *addonapiv1alpha1.ManagedClusterAddOn) bool { + for _, owner := range addon.OwnerReferences { + if owner.Kind != "ClusterManagementAddOn" { + continue + } + if owner.Name != addon.Name { + continue + } + return true + } + return false +} + +// GetSpecHash returns the sha256 hash of the spec field of the given object +func GetSpecHash(obj *unstructured.Unstructured) (string, error) { + if obj == nil { + return "", fmt.Errorf("object is nil") + } + spec, ok := obj.Object["spec"] + if !ok { + return "", fmt.Errorf("object has no spec field") + } + + specBytes, err := json.Marshal(spec) + if err != nil { + return "", err + } + + hash := sha256.Sum256(specBytes) + + return fmt.Sprintf("%x", hash), nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/permission.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/permission.go new file mode 100644 index 000000000..8108fd034 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/permission.go @@ -0,0 +1,354 @@ +package utils + +import ( + "context" + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/utils/pointer" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" +) + +const ( + RoleRefKindUser = "User" +) + +// RBACPermissionBuilder builds a agent.PermissionConfigFunc that applies Kubernetes RBAC policies. +type RBACPermissionBuilder interface { + // BindClusterRoleToUser is a shortcut that ensures a cluster role and binds to a hub user. + BindClusterRoleToUser(clusterRole *rbacv1.ClusterRole, username string) RBACPermissionBuilder + // BindClusterRoleToGroup is a shortcut that ensures a cluster role and binds to a hub user group. + BindClusterRoleToGroup(clusterRole *rbacv1.ClusterRole, userGroup string) RBACPermissionBuilder + // BindRoleToUser is a shortcut that ensures a role and binds to a hub user. + BindRoleToUser(clusterRole *rbacv1.Role, username string) RBACPermissionBuilder + // BindRoleToGroup is a shortcut that ensures a role binding and binds to a hub user. + BindRoleToGroup(clusterRole *rbacv1.Role, userGroup string) RBACPermissionBuilder + + // WithStaticClusterRole ensures a cluster role to the hub cluster. + WithStaticClusterRole(clusterRole *rbacv1.ClusterRole) RBACPermissionBuilder + // WithStaticClusterRoleBinding ensures a cluster role binding to the hub cluster. + WithStaticClusterRoleBinding(clusterRole *rbacv1.ClusterRoleBinding) RBACPermissionBuilder + // WithStaticRole ensures a role to the hub cluster. + WithStaticRole(clusterRole *rbacv1.Role) RBACPermissionBuilder + // WithStaticRole ensures a role binding to the hub cluster. + WithStaticRoleBinding(clusterRole *rbacv1.RoleBinding) RBACPermissionBuilder + + // Build wraps up the builder chain, and return a agent.PermissionConfigFunc. + Build() agent.PermissionConfigFunc +} + +var _ RBACPermissionBuilder = &permissionBuilder{} + +type permissionBuilder struct { + kubeClient kubernetes.Interface + u *unionPermissionBuilder +} + +// NewRBACPermissionConfigBuilder instantiates a default RBACPermissionBuilder. +func NewRBACPermissionConfigBuilder(kubeClient kubernetes.Interface) RBACPermissionBuilder { + return &permissionBuilder{ + u: &unionPermissionBuilder{}, + kubeClient: kubeClient, + } +} + +func (p *permissionBuilder) BindClusterRoleToUser(clusterRole *rbacv1.ClusterRole, username string) RBACPermissionBuilder { + return p.WithStaticClusterRole(clusterRole). + WithStaticClusterRoleBinding(&rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRole.Name, // The same name as the cluster role + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.UserKind, + Name: username, + }, + }, + }) +} + +func (p *permissionBuilder) BindClusterRoleToGroup(clusterRole *rbacv1.ClusterRole, userGroup string) RBACPermissionBuilder { + return p.WithStaticClusterRole(clusterRole). + WithStaticClusterRoleBinding(&rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRole.Name, // The same name as the cluster role + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.GroupKind, + Name: userGroup, + }, + }, + }) +} + +func (p *permissionBuilder) BindRoleToUser(role *rbacv1.Role, username string) RBACPermissionBuilder { + return p.WithStaticRole(role). + WithStaticRoleBinding(&rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: role.Name, // The same name as the cluster role + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: role.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.UserKind, + Name: username, + }, + }, + }) +} + +func (p *permissionBuilder) BindRoleToGroup(role *rbacv1.Role, userGroup string) RBACPermissionBuilder { + return p.WithStaticRole(role). + WithStaticRoleBinding(&rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: role.Name, // The same name as the cluster role + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: role.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.GroupKind, + Name: userGroup, + }, + }, + }) +} + +func (p *permissionBuilder) WithStaticClusterRole(clusterRole *rbacv1.ClusterRole) RBACPermissionBuilder { + p.u.fns = append(p.u.fns, func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + _, _, err := ApplyClusterRole(context.TODO(), p.kubeClient.RbacV1(), clusterRole) + return err + }) + return p +} + +func (p *permissionBuilder) WithStaticClusterRoleBinding(binding *rbacv1.ClusterRoleBinding) RBACPermissionBuilder { + p.u.fns = append(p.u.fns, func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + _, _, err := ApplyClusterRoleBinding(context.TODO(), p.kubeClient.RbacV1(), binding) + return err + }) + return p +} + +func (p *permissionBuilder) WithStaticRole(role *rbacv1.Role) RBACPermissionBuilder { + p.u.fns = append(p.u.fns, func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + role.Namespace = cluster.Name + ensureAddonOwnerReference(&role.ObjectMeta, addon) + _, _, err := ApplyRole(context.TODO(), p.kubeClient.RbacV1(), role) + return err + }) + return p +} + +func (p *permissionBuilder) WithStaticRoleBinding(binding *rbacv1.RoleBinding) RBACPermissionBuilder { + p.u.fns = append(p.u.fns, func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + binding.Namespace = cluster.Name + ensureAddonOwnerReference(&binding.ObjectMeta, addon) + _, _, err := ApplyRoleBinding(context.TODO(), p.kubeClient.RbacV1(), binding) + return err + }) + return p +} + +func (p *permissionBuilder) Build() agent.PermissionConfigFunc { + return p.u.build() +} + +type unionPermissionBuilder struct { + fns []agent.PermissionConfigFunc +} + +func (b *unionPermissionBuilder) build() agent.PermissionConfigFunc { + return func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + for _, fn := range b.fns { + if err := fn(cluster, addon); err != nil { + return err + } + } + return nil + } +} + +func ensureAddonOwnerReference(metadata *metav1.ObjectMeta, addon *addonapiv1alpha1.ManagedClusterAddOn) { + metadata.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: addonapiv1alpha1.GroupVersion.String(), + Kind: "ManagedClusterAddOn", + Name: addon.Name, + BlockOwnerDeletion: pointer.Bool(true), + UID: addon.UID, + }, + } +} + +// ApplyClusterRole merges objectmeta, requires rules, aggregation rules are not allowed for now. +func ApplyClusterRole(ctx context.Context, client rbacclientv1.ClusterRolesGetter, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { + if required.AggregationRule != nil && len(required.AggregationRule.ClusterRoleSelectors) != 0 { + return nil, false, fmt.Errorf("cannot create an aggregated cluster role") + } + + existing, err := client.ClusterRoles().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.ClusterRoles().Create( + ctx, requiredCopy, metav1.CreateOptions{}) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + existingCopy.AggregationRule = nil + + actual, err := client.ClusterRoles().Update(ctx, existingCopy, metav1.UpdateOptions{}) + return actual, true, err +} + +// ApplyClusterRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyClusterRoleBinding(ctx context.Context, + client rbacclientv1.ClusterRoleBindingsGetter, + required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { + existing, err := client.ClusterRoleBindings().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.ClusterRoleBindings().Create( + ctx, requiredCopy, metav1.CreateOptions{}) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == RoleRefKindUser { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if requiredCopy.Subjects[i].Kind == RoleRefKindUser { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + actual, err := client.ClusterRoleBindings().Update(ctx, existingCopy, metav1.UpdateOptions{}) + return actual, true, err +} + +// ApplyRole merges objectmeta, requires rules +func ApplyRole(ctx context.Context, client rbacclientv1.RolesGetter, required *rbacv1.Role) (*rbacv1.Role, bool, error) { + existing, err := client.Roles(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.Roles(required.Namespace).Create( + ctx, requiredCopy, metav1.CreateOptions{}) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + + actual, err := client.Roles(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + return actual, true, err +} + +// ApplyRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyRoleBinding(ctx context.Context, client rbacclientv1.RoleBindingsGetter, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { + existing, err := client.RoleBindings(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.RoleBindings(required.Namespace).Create( + ctx, requiredCopy, metav1.CreateOptions{}) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs and subjects + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == RoleRefKindUser { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if requiredCopy.Subjects[i].Kind == RoleRefKindUser { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + actual, err := client.RoleBindings(requiredCopy.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + return actual, true, err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/probe_helper.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/probe_helper.go new file mode 100644 index 000000000..2dd4c1323 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/probe_helper.go @@ -0,0 +1,80 @@ +package utils + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/types" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" +) + +// DeploymentProber is to check the addon status based on status +// of the agent deployment status +type DeploymentProber struct { + deployments []types.NamespacedName +} + +func NewDeploymentProber(deployments ...types.NamespacedName) *agent.HealthProber { + probeFields := []agent.ProbeField{} + for _, deploy := range deployments { + probeFields = append(probeFields, agent.ProbeField{ + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Name: deploy.Name, + Namespace: deploy.Namespace, + }, + ProbeRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }) + } + return &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + WorkProber: &agent.WorkHealthProber{ + ProbeFields: probeFields, + HealthCheck: HealthCheck, + }, + } +} + +func (d *DeploymentProber) ProbeFields() []agent.ProbeField { + probeFields := []agent.ProbeField{} + for _, deploy := range d.deployments { + probeFields = append(probeFields, agent.ProbeField{ + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Name: deploy.Name, + Namespace: deploy.Namespace, + }, + ProbeRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }) + } + return probeFields +} + +func HealthCheck(identifier workapiv1.ResourceIdentifier, result workapiv1.StatusFeedbackResult) error { + if len(result.Values) == 0 { + return fmt.Errorf("no values are probed for deployment %s/%s", identifier.Namespace, identifier.Name) + } + for _, value := range result.Values { + if value.Name != "ReadyReplicas" { + continue + } + + if *value.Value.Integer >= 1 { + return nil + } + + return fmt.Errorf("readyReplica is %d for deployment %s/%s", *value.Value.Integer, identifier.Namespace, identifier.Name) + } + return fmt.Errorf("readyReplica is not probed") +} diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go b/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go new file mode 100644 index 000000000..d186d08ea --- /dev/null +++ b/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go @@ -0,0 +1,376 @@ +package workbuilder + +import ( + "context" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" + "open-cluster-management.io/api/utils/work/v1/workapplier" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +// DefaultManifestLimit is the max size of manifests data which is 50k bytes by default. +const DefaultManifestLimit = 50 * 1024 + +// DefaultManifestThreshold is the threshold size of manifests for the first new created manifestWork. +const DefaultManifestThreshold = 0.8 + +type manifestKey struct { + gvk schema.GroupVersionKind + namespace, name string +} + +type manifestSize struct { + manifest workapiv1.Manifest + // size is the size of the manifest + size int +} + +type manifestWorkBuffer struct { + work *workapiv1.ManifestWork + // buffer is the remaining size to append the manifests in the manifestWork + buffer int +} + +type requiredManifestMapper map[manifestKey]workapiv1.Manifest + +// GenerateManifestWorkObjectMeta is to generate a new manifestWork meta, need to make sure the name is unique. +type GenerateManifestWorkObjectMeta func(index int) metav1.ObjectMeta + +type WorkBuilder struct { + manifestLimit int +} + +type internalWorkBuilder struct { + workBuilder *WorkBuilder + generateManifestWorkObjectMeta GenerateManifestWorkObjectMeta + deletionOption *workapiv1.DeleteOption + executorOption *workapiv1.ManifestWorkExecutor + existingManifestWorks []workapiv1.ManifestWork + manifestConfigOption []workapiv1.ManifestConfigOption + annotations map[string]string +} +type WorkBuilderOption func(*internalWorkBuilder) *internalWorkBuilder + +func ExistingManifestWorksOption(works []workapiv1.ManifestWork) WorkBuilderOption { + return func(factory *internalWorkBuilder) *internalWorkBuilder { + factory.existingManifestWorks = works + return factory + } +} + +func DeletionOption(option *workapiv1.DeleteOption) WorkBuilderOption { + return func(builder *internalWorkBuilder) *internalWorkBuilder { + builder.deletionOption = option + return builder + } +} + +func ManifestConfigOption(option []workapiv1.ManifestConfigOption) WorkBuilderOption { + return func(builder *internalWorkBuilder) *internalWorkBuilder { + builder.manifestConfigOption = option + return builder + } +} + +func ManifestWorkExecutorOption(executor *workapiv1.ManifestWorkExecutor) WorkBuilderOption { + return func(builder *internalWorkBuilder) *internalWorkBuilder { + builder.executorOption = executor + return builder + } +} + +func ManifestAnnotations(annotations map[string]string) WorkBuilderOption { + return func(builder *internalWorkBuilder) *internalWorkBuilder { + builder.annotations = annotations + return builder + } +} + +func NewWorkBuilder() *WorkBuilder { + return &WorkBuilder{ + manifestLimit: int(float64(DefaultManifestLimit) * DefaultManifestThreshold), + } +} + +// WithManifestsLimit is to set the total limit size of manifests in manifestWork. the unit of manifestsLimit is byte. +// the actual size of manifests will be the 80% of the limit. +// This is an optional setting, and the default limit is 50k. +func (w *WorkBuilder) WithManifestsLimit(manifestsLimit int) *WorkBuilder { + w.manifestLimit = int(float64(manifestsLimit) * DefaultManifestThreshold) + return w +} + +func (w *WorkBuilder) newInternalWorkBuilder(generateManifestWorkObjectMeta GenerateManifestWorkObjectMeta, + options ...WorkBuilderOption) *internalWorkBuilder { + factory := &internalWorkBuilder{ + workBuilder: w, + generateManifestWorkObjectMeta: generateManifestWorkObjectMeta, + } + + for _, opt := range options { + factory = opt(factory) + } + return factory +} + +// Build is to build a set of applied/deleted manifestWorks with the objects and options. +func (w *WorkBuilder) Build(objects []runtime.Object, + generateManifestWorkObjectMeta GenerateManifestWorkObjectMeta, + options ...WorkBuilderOption) (applied, deleted []*workapiv1.ManifestWork, err error) { + builder := w.newInternalWorkBuilder(generateManifestWorkObjectMeta, options...) + return builder.buildManifestWorks(objects) +} + +// BuildAndApply is to build a set manifestWorks using the objects and update the existing manifestWorks. +func (w *WorkBuilder) BuildAndApply(ctx context.Context, + objects []runtime.Object, + generateManifestWorkObjectMeta GenerateManifestWorkObjectMeta, + workApplier *workapplier.WorkApplier, + options ...WorkBuilderOption) error { + appliedWorks, deletedWorks, err := w.Build(objects, generateManifestWorkObjectMeta, options...) + if err != nil { + return err + } + + var errs []error + for _, work := range appliedWorks { + if _, err = workApplier.Apply(ctx, work); err != nil { + errs = append(errs, err) + } + } + + for _, work := range deletedWorks { + if err = workApplier.Delete(ctx, work.Namespace, work.Name); err != nil { + errs = append(errs, err) + } + } + + if len(errs) != 0 { + return errors.NewAggregate(errs) + } + return nil +} + +func (f *internalWorkBuilder) buildManifestWorks(objects []runtime.Object) (appliedWorks, deletedWorks []*workapiv1.ManifestWork, err error) { + var updatedWorks []manifestWorkBuffer + + requiredMapper, err := generateRequiredManifestMapper(objects) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate required mapper.err %v", err) + } + + // this step to update the existing manifestWorks, update the existing manifests and delete non-existing manifest + for _, existingWork := range f.existingManifestWorks { + // new a work with init work meta and keep the existing work name. + requiredWork := f.initManifestWorkWithName(existingWork.Name) + + for _, manifest := range existingWork.Spec.Workload.Manifests { + key, err := generateManifestKey(manifest) + if err != nil { + return nil, nil, err + } + + // currently,we have 80% threshold for the size of manifests, update directly. + // TODO: need to consider if the size of updated manifests is more then the limit of manifestWork. + if _, ok := requiredMapper[key]; ok { + requiredWork.Spec.Workload.Manifests = append(requiredWork.Spec.Workload.Manifests, requiredMapper[key]) + delete(requiredMapper, key) + continue + } + } + updatedWorks = append(updatedWorks, manifestWorkBuffer{ + work: requiredWork, + buffer: f.bufferOfManifestWork(requiredWork), + }) + } + + // there are new added manifests + if len(requiredMapper) != 0 { + var requiredManifests []manifestSize + for _, manifest := range requiredMapper { + requiredManifests = append(requiredManifests, manifestSize{manifest: manifest, size: manifest.Size()}) + } + + // sort from big to small by size + sort.SliceStable(requiredManifests, func(i, j int) bool { + return requiredManifests[j].size < requiredManifests[i].size + }) + + var newManifests []workapiv1.Manifest + // the manifest will be filled into the existing work with the max buffer. + // if cannot, will be filled into a new work. + for manifestIndex := 0; manifestIndex < len(requiredManifests); manifestIndex++ { + maxWorkBuffer := getMaxManifestWorkBuffer(updatedWorks) + if maxWorkBuffer != nil && requiredManifests[manifestIndex].size < maxWorkBuffer.buffer { + maxWorkBuffer.work.Spec.Workload.Manifests = append(maxWorkBuffer.work.Spec.Workload.Manifests, + requiredManifests[manifestIndex].manifest) + maxWorkBuffer.buffer = maxWorkBuffer.buffer - requiredManifests[manifestIndex].size + continue + } + + newManifests = append(newManifests, requiredManifests[manifestIndex].manifest) + } + + if len(newManifests) != 0 { + newWorks, err := f.newManifestWorks(newManifests, len(updatedWorks)) + if err != nil { + return nil, nil, err + } + appliedWorks = append(appliedWorks, newWorks...) + } + } + + for workIndex := 0; workIndex < len(updatedWorks); workIndex++ { + if len(updatedWorks[workIndex].work.Spec.Workload.Manifests) == 0 { + deletedWorks = append(deletedWorks, updatedWorks[workIndex].work) + continue + } + appliedWorks = append(appliedWorks, updatedWorks[workIndex].work) + } + + return appliedWorks, deletedWorks, nil +} + +func (f *internalWorkBuilder) newManifestWorks(manifests []workapiv1.Manifest, workIndex int) ([]*workapiv1.ManifestWork, error) { + var manifestWorks []*workapiv1.ManifestWork + var totalSize = 0 + + work := f.initManifestWork(workIndex) + manifestWorks = append(manifestWorks, work) + for i := 0; i < len(manifests); i++ { + if totalSize+manifests[i].Size() < f.workBuilder.manifestLimit { + work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, manifests[i]) + totalSize = totalSize + manifests[i].Size() + continue + } + + workIndex = workIndex + 1 + work = f.initManifestWork(workIndex) + manifestWorks = append(manifestWorks, work) + work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, manifests[i]) + totalSize = manifests[i].Size() + } + + return manifestWorks, nil +} + +func (f *internalWorkBuilder) initManifestWork(index int) *workapiv1.ManifestWork { + work := &workapiv1.ManifestWork{ + ObjectMeta: f.generateManifestWorkObjectMeta(index), + } + f.setManifestWorkOptions(work) + f.setAnnotations(work) + return work +} + +// init a work with existing name +func (f *internalWorkBuilder) initManifestWorkWithName(workName string) *workapiv1.ManifestWork { + work := f.initManifestWork(0) + work.SetName(workName) + return work +} + +func (f *internalWorkBuilder) setManifestWorkOptions(work *workapiv1.ManifestWork) { + // currently we set the options to each generated manifestWorks + work.Spec.DeleteOption = f.deletionOption + work.Spec.ManifestConfigs = f.manifestConfigOption + work.Spec.Executor = f.executorOption +} + +func (f *internalWorkBuilder) setAnnotations(work *workapiv1.ManifestWork) { + work.SetAnnotations(f.annotations) +} + +func (f *internalWorkBuilder) bufferOfManifestWork(work *workapiv1.ManifestWork) int { + totalSize := 0 + for _, manifest := range work.Spec.Workload.Manifests { + totalSize = totalSize + manifest.Size() + } + if totalSize > f.workBuilder.manifestLimit { + return 0 + } + return f.workBuilder.manifestLimit - totalSize +} + +func getMaxManifestWorkBuffer(workBuffers []manifestWorkBuffer) *manifestWorkBuffer { + var maxWorkBuffer *manifestWorkBuffer + maxBuffer := 0 + for key, workBuffer := range workBuffers { + if workBuffer.buffer > maxBuffer { + maxWorkBuffer = &workBuffers[key] + maxBuffer = workBuffer.buffer + } + } + return maxWorkBuffer +} + +func buildManifest(object runtime.Object) (workapiv1.Manifest, error) { + rawObject, err := runtime.Encode(unstructured.UnstructuredJSONScheme, object) + if err != nil { + return workapiv1.Manifest{}, fmt.Errorf("failed to encode object %v, err: %v", object, err) + } + manifest := workapiv1.Manifest{RawExtension: runtime.RawExtension{Raw: rawObject}} + return manifest, nil +} + +func generateManifestKey(manifest workapiv1.Manifest) (manifestKey, error) { + var object runtime.Object + var err error + if manifest.Object != nil { + object = manifest.Object + } else { + object, err = runtime.Decode(unstructured.UnstructuredJSONScheme, manifest.Raw) + if err != nil { + return manifestKey{}, err + } + } + + gvk := object.GetObjectKind().GroupVersionKind() + if gvk.Kind == "" || gvk.Version == "" { + return manifestKey{}, fmt.Errorf("got empty kind/version from object %v", object) + } + key := manifestKey{ + gvk: gvk, + } + + accessor, err := meta.Accessor(object) + if err != nil { + return key, err + } + key.namespace = accessor.GetNamespace() + key.name = accessor.GetName() + return key, nil +} + +func generateRequiredManifestMapper(objects []runtime.Object) (requiredManifestMapper, error) { + var mapper = requiredManifestMapper{} + for _, object := range objects { + rawObject, err := runtime.Encode(unstructured.UnstructuredJSONScheme, object) + if err != nil { + return nil, fmt.Errorf("failed to encode object %v, err: %v", object, err) + } + + gvk := object.GetObjectKind().GroupVersionKind() + if gvk.Kind == "" || gvk.Version == "" { + return nil, fmt.Errorf("got empty kind/version from object %v", object) + } + key := manifestKey{ + gvk: gvk, + } + accessor, err := meta.Accessor(object) + if err != nil { + return nil, err + } + key.namespace = accessor.GetNamespace() + key.name = accessor.GetName() + mapper[key] = workapiv1.Manifest{RawExtension: runtime.RawExtension{Raw: rawObject}} + } + return mapper, nil +}