mirror of
https://github.com/rancher/k3k.git
synced 2026-03-17 09:00:35 +00:00
Compare commits
8 Commits
bump-chart
...
renovate/g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5feede357a | ||
|
|
fcb05793b1 | ||
|
|
83b4415f02 | ||
|
|
cd72bcbc15 | ||
|
|
9836f8376d | ||
|
|
dba054786e | ||
|
|
c94f7c7a30 | ||
|
|
1a16527750 |
@@ -935,6 +935,29 @@ spec:
|
||||
- Terminating
|
||||
- Unknown
|
||||
type: string
|
||||
policy:
|
||||
description: |-
|
||||
policy represents the status of the policy applied to this cluster.
|
||||
This field is set by the VirtualClusterPolicy controller.
|
||||
properties:
|
||||
name:
|
||||
description: name is the name of the VirtualClusterPolicy currently
|
||||
applied to this cluster.
|
||||
minLength: 1
|
||||
type: string
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: nodeSelector is a node selector enforced by the active
|
||||
VirtualClusterPolicy.
|
||||
type: object
|
||||
priorityClass:
|
||||
description: priorityClass is the priority class enforced by the
|
||||
active VirtualClusterPolicy.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
policyName:
|
||||
description: PolicyName specifies the virtual cluster policy name
|
||||
bound to the virtual cluster.
|
||||
|
||||
@@ -41,6 +41,29 @@ _Appears In:_
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy"]
|
||||
=== AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`name`* __string__ | name is the name of the VirtualClusterPolicy currently applied to this cluster. + | | MinLength: 1 +
|
||||
|
||||
| *`priorityClass`* __string__ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. + | |
|
||||
| *`nodeSelector`* __object (keys:string, values:string)__ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster"]
|
||||
=== Cluster
|
||||
|
||||
|
||||
@@ -32,6 +32,24 @@ _Appears in:_
|
||||
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
|
||||
|
||||
|
||||
#### AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `name` _string_ | name is the name of the VirtualClusterPolicy currently applied to this cluster. | | MinLength: 1 <br /> |
|
||||
| `priorityClass` _string_ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. | | |
|
||||
|
||||
|
||||
#### Cluster
|
||||
|
||||
|
||||
|
||||
46
go.mod
46
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/rancher/k3k
|
||||
|
||||
go 1.25
|
||||
go 1.25.0
|
||||
|
||||
toolchain go1.25.6
|
||||
|
||||
@@ -17,7 +17,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/testcontainers/testcontainers-go v0.40.0
|
||||
github.com/testcontainers/testcontainers-go v0.41.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.40.0
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
|
||||
go.etcd.io/etcd/api/v3 v3.5.21
|
||||
@@ -40,6 +40,8 @@ require (
|
||||
sigs.k8s.io/controller-runtime v0.19.4
|
||||
)
|
||||
|
||||
require github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.19.1 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
@@ -70,10 +72,10 @@ require (
|
||||
github.com/cyphar/filepath-securejoin v0.5.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v28.5.1+incompatible // indirect
|
||||
github.com/docker/docker v28.5.2+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/ebitengine/purego v0.10.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
@@ -114,7 +116,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
@@ -130,7 +132,7 @@ require (
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
github.com/moby/go-archive v0.2.0 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
@@ -149,7 +151,7 @@ require (
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
@@ -158,43 +160,43 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.26.2 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.41.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/crypto v0.48.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/term v0.40.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
|
||||
|
||||
104
go.sum
104
go.sum
@@ -44,6 +44,8 @@ github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuP
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -86,8 +88,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
@@ -100,8 +102,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -243,8 +245,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -290,8 +292,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||
github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8=
|
||||
github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
@@ -338,8 +340,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
|
||||
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
@@ -359,8 +361,8 @@ github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
|
||||
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
|
||||
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o=
|
||||
github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
@@ -371,8 +373,8 @@ github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEV
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
@@ -410,14 +412,14 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY=
|
||||
github.com/testcontainers/testcontainers-go v0.41.0 h1:mfpsD0D36YgkxGj2LrIyxuwQ9i2wCKAD+ESsYM1wais=
|
||||
github.com/testcontainers/testcontainers-go v0.41.0/go.mod h1:pdFrEIfaPl24zmBjerWTTYaY0M6UHsqA1YSvsoU40MI=
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.40.0 h1:3w6SjtIp/+FdpjWJCyPqaGWknG2iU6MacEWA7hl0IqQ=
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.40.0/go.mod h1:1xJwmfO2g+XKox9LiJXKGCm1vWp7LozX+78UjXVRbF0=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
|
||||
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
|
||||
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
|
||||
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803 h1:0O149bxUoQL69b4+pcGaCbKk2bvA/43AhkczkDuRjMc=
|
||||
@@ -450,8 +452,8 @@ go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU
|
||||
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
|
||||
@@ -460,8 +462,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c=
|
||||
go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
|
||||
@@ -474,8 +476,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0/go.mod h1:Izur+Wt8gClgMJqO/cZ8wdeeMryJ/xxiOVgFSSfpDTY=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU=
|
||||
@@ -486,16 +488,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
|
||||
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
|
||||
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ=
|
||||
go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps=
|
||||
go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8=
|
||||
go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y=
|
||||
go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0=
|
||||
go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@@ -511,8 +513,8 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
@@ -521,8 +523,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -532,8 +534,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
@@ -542,8 +544,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -555,16 +557,14 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -575,8 +575,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -100,7 +100,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -276,7 +276,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
|
||||
cfg.AgentHostname,
|
||||
k.port,
|
||||
k.agentIP,
|
||||
utilProvider.HostClient,
|
||||
k.hostMgr,
|
||||
utilProvider.VirtualClient,
|
||||
k.virtualCluster,
|
||||
cfg.Version,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -13,13 +14,13 @@ import (
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostClient client.Client, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
|
||||
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
|
||||
ctx := context.Background()
|
||||
|
||||
if mirrorHostNodes {
|
||||
var hostNode corev1.Node
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
|
||||
logger.Error(err, "error getting host node for mirroring", err)
|
||||
if err := hostMgr.GetAPIReader().Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
|
||||
logger.Error(err, "error getting host node for mirroring")
|
||||
}
|
||||
|
||||
node.Spec = *hostNode.Spec.DeepCopy()
|
||||
@@ -48,7 +49,7 @@ func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servi
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
|
||||
startNodeCapacityUpdater(ctx, logger, hostClient, virtualClient, virtualCluster, node.Name)
|
||||
startNodeCapacityUpdater(ctx, logger, hostMgr.GetClient(), virtualClient, virtualCluster, node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -83,11 +84,30 @@ func updateNodeCapacity(ctx context.Context, logger logr.Logger, hostClient clie
|
||||
|
||||
mergedResourceLists := mergeResourceLists(resourceLists...)
|
||||
|
||||
m, err := distributeQuotas(ctx, logger, virtualClient, mergedResourceLists)
|
||||
if err != nil {
|
||||
logger.Error(err, "error distributing policy quota")
|
||||
var virtualNodeList, hostNodeList corev1.NodeList
|
||||
|
||||
if err := virtualClient.List(ctx, &virtualNodeList); err != nil {
|
||||
logger.Error(err, "error listing virtual nodes for stable capacity distribution")
|
||||
}
|
||||
|
||||
virtResourceMap := make(map[string]corev1.ResourceList)
|
||||
for _, vNode := range virtualNodeList.Items {
|
||||
virtResourceMap[vNode.Name] = corev1.ResourceList{}
|
||||
}
|
||||
|
||||
if err := hostClient.List(ctx, &hostNodeList); err != nil {
|
||||
logger.Error(err, "error listing host nodes for stable capacity distribution")
|
||||
}
|
||||
|
||||
hostResourceMap := make(map[string]corev1.ResourceList)
|
||||
|
||||
for _, hNode := range hostNodeList.Items {
|
||||
if _, ok := virtResourceMap[hNode.Name]; ok {
|
||||
hostResourceMap[hNode.Name] = hNode.Status.Allocatable
|
||||
}
|
||||
}
|
||||
|
||||
m := distributeQuotas(hostResourceMap, virtResourceMap, mergedResourceLists)
|
||||
allocatable = m[virtualNodeName]
|
||||
}
|
||||
|
||||
@@ -125,76 +145,99 @@ func mergeResourceLists(resourceLists ...corev1.ResourceList) corev1.ResourceLis
|
||||
return merged
|
||||
}
|
||||
|
||||
// distributeQuotas divides the total resource quotas evenly among all active virtual nodes.
|
||||
// This ensures that each virtual node reports a fair share of the available resources,
|
||||
// preventing the scheduler from overloading a single node.
|
||||
// distributeQuotas divides the total resource quotas among all active virtual nodes,
|
||||
// capped by each node's actual host capacity. This ensures that each virtual node
|
||||
// reports a fair share of the available resources without exceeding what its
|
||||
// underlying host node can provide.
|
||||
//
|
||||
// The algorithm iterates over each resource, divides it as evenly as possible among the
|
||||
// sorted virtual nodes, and distributes any remainder to the first few nodes to ensure
|
||||
// all resources are allocated. Sorting the nodes by name guarantees a deterministic
|
||||
// distribution.
|
||||
func distributeQuotas(ctx context.Context, logger logr.Logger, virtualClient client.Client, quotas corev1.ResourceList) (map[string]corev1.ResourceList, error) {
|
||||
// List all virtual nodes to distribute the quota stably.
|
||||
var virtualNodeList corev1.NodeList
|
||||
if err := virtualClient.List(ctx, &virtualNodeList); err != nil {
|
||||
logger.Error(err, "error listing virtual nodes for stable capacity distribution, falling back to full quota")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there are no virtual nodes, there's nothing to distribute.
|
||||
numNodes := int64(len(virtualNodeList.Items))
|
||||
if numNodes == 0 {
|
||||
logger.Info("error listing virtual nodes for stable capacity distribution, falling back to full quota")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Sort nodes by name for a deterministic distribution of resources.
|
||||
sort.Slice(virtualNodeList.Items, func(i, j int) bool {
|
||||
return virtualNodeList.Items[i].Name < virtualNodeList.Items[j].Name
|
||||
})
|
||||
|
||||
// Initialize the resource map for each virtual node.
|
||||
resourceMap := make(map[string]corev1.ResourceList)
|
||||
for _, virtualNode := range virtualNodeList.Items {
|
||||
resourceMap[virtualNode.Name] = corev1.ResourceList{}
|
||||
}
|
||||
// For each resource type the algorithm uses a multi-pass redistribution loop:
|
||||
// 1. Divide the remaining quota evenly among eligible nodes (sorted by name for
|
||||
// determinism), assigning any integer remainder to the first nodes alphabetically.
|
||||
// 2. Cap each node's share at its host allocatable capacity.
|
||||
// 3. Remove nodes that have reached their host capacity.
|
||||
// 4. If there is still unallocated quota (because some nodes were capped below their
|
||||
// even share), repeat from step 1 with the remaining quota and remaining nodes.
|
||||
//
|
||||
// The loop terminates when the quota is fully distributed or no eligible nodes remain.
|
||||
func distributeQuotas(hostResourceMap, virtResourceMap map[string]corev1.ResourceList, quotas corev1.ResourceList) map[string]corev1.ResourceList {
|
||||
resourceMap := make(map[string]corev1.ResourceList, len(virtResourceMap))
|
||||
maps.Copy(resourceMap, virtResourceMap)
|
||||
|
||||
// Distribute each resource type from the policy's hard quota
|
||||
for resourceName, totalQuantity := range quotas {
|
||||
// Use MilliValue for precise division, especially for resources like CPU,
|
||||
// which are often expressed in milli-units. Otherwise, use the standard Value().
|
||||
var totalValue int64
|
||||
if _, found := milliScaleResources[resourceName]; found {
|
||||
totalValue = totalQuantity.MilliValue()
|
||||
} else {
|
||||
totalValue = totalQuantity.Value()
|
||||
_, useMilli := milliScaleResources[resourceName]
|
||||
|
||||
// eligible nodes for each distribution cycle
|
||||
var eligibleNodes []string
|
||||
|
||||
hostCap := make(map[string]int64)
|
||||
|
||||
// Populate the host nodes capacity map and the initial effective nodes
|
||||
for vn := range virtResourceMap {
|
||||
hostNodeResources := hostResourceMap[vn]
|
||||
if hostNodeResources == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceQuantity, found := hostNodeResources[resourceName]
|
||||
if !found {
|
||||
// skip the node if the resource does not exist on the host node
|
||||
continue
|
||||
}
|
||||
|
||||
hostCap[vn] = resourceQuantity.Value()
|
||||
if useMilli {
|
||||
hostCap[vn] = resourceQuantity.MilliValue()
|
||||
}
|
||||
|
||||
eligibleNodes = append(eligibleNodes, vn)
|
||||
}
|
||||
|
||||
// Calculate the base quantity of the resource to be allocated per node.
|
||||
// and the remainder that needs to be distributed among the nodes.
|
||||
//
|
||||
// For example, if totalValue is 2000 (e.g., 2 CPU) and there are 3 nodes:
|
||||
// - quantityPerNode would be 666 (2000 / 3)
|
||||
// - remainder would be 2 (2000 % 3)
|
||||
// The first two nodes would get 667 (666 + 1), and the last one would get 666.
|
||||
quantityPerNode := totalValue / numNodes
|
||||
remainder := totalValue % numNodes
|
||||
sort.Strings(eligibleNodes)
|
||||
|
||||
// Iterate through the sorted virtual nodes to distribute the resource.
|
||||
for _, virtualNode := range virtualNodeList.Items {
|
||||
nodeQuantity := quantityPerNode
|
||||
if remainder > 0 {
|
||||
nodeQuantity++
|
||||
remainder--
|
||||
totalValue := totalQuantity.Value()
|
||||
if useMilli {
|
||||
totalValue = totalQuantity.MilliValue()
|
||||
}
|
||||
|
||||
// Start of the distribution cycle, each cycle will distribute the quota resource
|
||||
// evenly between nodes, each node can not exceed the corresponding host node capacity
|
||||
for totalValue > 0 && len(eligibleNodes) > 0 {
|
||||
nodeNum := int64(len(eligibleNodes))
|
||||
quantityPerNode := totalValue / nodeNum
|
||||
remainder := totalValue % nodeNum
|
||||
|
||||
remainingNodes := []string{}
|
||||
|
||||
for _, virtualNodeName := range eligibleNodes {
|
||||
nodeQuantity := quantityPerNode
|
||||
if remainder > 0 {
|
||||
nodeQuantity++
|
||||
remainder--
|
||||
}
|
||||
// We cap the quantity to the hostNode capacity
|
||||
nodeQuantity = min(nodeQuantity, hostCap[virtualNodeName])
|
||||
|
||||
if nodeQuantity > 0 {
|
||||
existing := resourceMap[virtualNodeName][resourceName]
|
||||
if useMilli {
|
||||
resourceMap[virtualNodeName][resourceName] = *resource.NewMilliQuantity(existing.MilliValue()+nodeQuantity, totalQuantity.Format)
|
||||
} else {
|
||||
resourceMap[virtualNodeName][resourceName] = *resource.NewQuantity(existing.Value()+nodeQuantity, totalQuantity.Format)
|
||||
}
|
||||
}
|
||||
|
||||
totalValue -= nodeQuantity
|
||||
hostCap[virtualNodeName] -= nodeQuantity
|
||||
|
||||
if hostCap[virtualNodeName] > 0 {
|
||||
remainingNodes = append(remainingNodes, virtualNodeName)
|
||||
}
|
||||
}
|
||||
|
||||
if _, found := milliScaleResources[resourceName]; found {
|
||||
resourceMap[virtualNode.Name][resourceName] = *resource.NewMilliQuantity(nodeQuantity, totalQuantity.Format)
|
||||
} else {
|
||||
resourceMap[virtualNode.Name][resourceName] = *resource.NewQuantity(nodeQuantity, totalQuantity.Format)
|
||||
}
|
||||
eligibleNodes = remainingNodes
|
||||
}
|
||||
}
|
||||
|
||||
return resourceMap, nil
|
||||
return resourceMap
|
||||
}
|
||||
|
||||
@@ -1,19 +1,13 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func Test_distributeQuotas(t *testing.T) {
|
||||
@@ -21,39 +15,56 @@ func Test_distributeQuotas(t *testing.T) {
|
||||
err := corev1.AddToScheme(scheme)
|
||||
assert.NoError(t, err)
|
||||
|
||||
node1 := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}
|
||||
node2 := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2"}}
|
||||
node3 := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-3"}}
|
||||
// Large allocatable so capping doesn't interfere with basic distribution tests.
|
||||
largeAllocatable := corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
corev1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
corev1.ResourcePods: resource.MustParse("1000"),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
virtualNodes []client.Object
|
||||
quotas corev1.ResourceList
|
||||
want map[string]corev1.ResourceList
|
||||
wantErr bool
|
||||
name string
|
||||
virtResourceMap map[string]corev1.ResourceList
|
||||
hostResourceMap map[string]corev1.ResourceList
|
||||
quotas corev1.ResourceList
|
||||
want map[string]corev1.ResourceList
|
||||
}{
|
||||
{
|
||||
name: "no virtual nodes",
|
||||
virtualNodes: []client.Object{},
|
||||
name: "no virtual nodes",
|
||||
virtResourceMap: map[string]corev1.ResourceList{},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{},
|
||||
wantErr: false,
|
||||
want: map[string]corev1.ResourceList{},
|
||||
},
|
||||
{
|
||||
name: "no quotas",
|
||||
virtualNodes: []client.Object{node1, node2},
|
||||
quotas: corev1.ResourceList{},
|
||||
name: "no quotas",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "even distribution of cpu and memory",
|
||||
virtualNodes: []client.Object{node1, node2},
|
||||
name: "fewer virtual nodes than host nodes",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
"node-4": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
@@ -68,65 +79,203 @@ func Test_distributeQuotas(t *testing.T) {
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "uneven distribution with remainder",
|
||||
virtualNodes: []client.Object{node1, node2, node3},
|
||||
name: "even distribution of cpu and memory",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"), // 2000m / 3 = 666m with 2m remainder
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "uneven distribution with remainder",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {corev1.ResourceCPU: resource.MustParse("667m")},
|
||||
"node-2": {corev1.ResourceCPU: resource.MustParse("667m")},
|
||||
"node-3": {corev1.ResourceCPU: resource.MustParse("666m")},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "distribution of number resources",
|
||||
virtualNodes: []client.Object{node1, node2, node3},
|
||||
name: "distribution of number resources",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourcePods: resource.MustParse("11"),
|
||||
corev1.ResourceSecrets: resource.MustParse("9"),
|
||||
"custom": resource.MustParse("8"),
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourcePods: resource.MustParse("11"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
corev1.ResourceSecrets: resource.MustParse("3"),
|
||||
"custom": resource.MustParse("3"),
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
corev1.ResourceSecrets: resource.MustParse("3"),
|
||||
"custom": resource.MustParse("3"),
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("666m"),
|
||||
corev1.ResourcePods: resource.MustParse("3"),
|
||||
corev1.ResourceSecrets: resource.MustParse("3"),
|
||||
"custom": resource.MustParse("2"),
|
||||
corev1.ResourceCPU: resource.MustParse("666m"),
|
||||
corev1.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "extended resource distributed only to nodes that have it",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("3"),
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "capping at host capacity with redistribution",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("8"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("6"),
|
||||
},
|
||||
// Even split would be 3 each, but node-2 only has 2 CPU.
|
||||
// node-2 gets capped at 2, the remaining 1 goes to node-1.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {corev1.ResourceCPU: resource.MustParse("4")},
|
||||
"node-2": {corev1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpu capping with uneven host capacity",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
"nvidia.com/gpu": resource.MustParse("6"),
|
||||
},
|
||||
"node-2": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
// Even split would be 2 each, but node-2 only has 1 GPU.
|
||||
// node-2 gets capped at 1, the remaining 1 goes to node-1.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {"nvidia.com/gpu": resource.MustParse("3")},
|
||||
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "quota exceeds total host capacity",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
"node-3": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
"nvidia.com/gpu": resource.MustParse("10"),
|
||||
},
|
||||
// Total host capacity is 4, quota is 10. Each node gets its full capacity.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {"nvidia.com/gpu": resource.MustParse("2")},
|
||||
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
"node-3": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.virtualNodes...).Build()
|
||||
logger := zapr.NewLogger(zap.NewNop())
|
||||
|
||||
got, gotErr := distributeQuotas(context.Background(), logger, fakeClient, tt.quotas)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, gotErr)
|
||||
} else {
|
||||
assert.NoError(t, gotErr)
|
||||
}
|
||||
got := distributeQuotas(tt.hostResourceMap, tt.virtResourceMap, tt.quotas)
|
||||
|
||||
assert.Equal(t, len(tt.want), len(got), "Number of nodes in result should match")
|
||||
|
||||
|
||||
@@ -401,28 +401,45 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
// Schedule the host pod in the same host node of the virtual kubelet
|
||||
hostPod.Spec.NodeName = p.agentHostname
|
||||
|
||||
// The pod's own nodeSelector is ignored.
|
||||
// The final selector is determined by the cluster spec, but overridden by a policy if present.
|
||||
hostPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
if cluster.Status.Policy != nil && len(cluster.Status.Policy.NodeSelector) > 0 {
|
||||
hostPod.Spec.NodeSelector = cluster.Status.Policy.NodeSelector
|
||||
}
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if virtualPod.Spec.Hostname == "" {
|
||||
hostPod.Spec.Hostname = k3kcontroller.SafeConcatName(virtualPod.Name)
|
||||
}
|
||||
|
||||
// if the priorityClass for the virtual cluster is set then override the provided value
|
||||
// When a PriorityClass is set we will use the translated one in the HostCluster.
|
||||
// If the Cluster or a Policy defines a PriorityClass of the host we are going to use that one.
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if !strings.HasPrefix(hostPod.Spec.PriorityClassName, "system-") {
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", hostPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.PriorityClassName = tPriorityClassName
|
||||
//
|
||||
// TODO: we probably need to define a custom "intermediate" k3k-system-* priority
|
||||
if strings.HasPrefix(virtualPod.Spec.PriorityClassName, "system-") {
|
||||
hostPod.Spec.PriorityClassName = virtualPod.Spec.PriorityClassName
|
||||
} else {
|
||||
enforcedPriorityClassName := cluster.Spec.PriorityClass
|
||||
if cluster.Status.Policy != nil && cluster.Status.Policy.PriorityClass != nil {
|
||||
enforcedPriorityClassName = *cluster.Status.Policy.PriorityClass
|
||||
}
|
||||
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
hostPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
if enforcedPriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = enforcedPriorityClassName
|
||||
} else if virtualPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = p.Translator.TranslateName("", virtualPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
}
|
||||
|
||||
// if the priority class is set we need to remove the priority
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
|
||||
p.configurePodEnvs(hostPod, &virtualPod)
|
||||
|
||||
// fieldpath annotations
|
||||
|
||||
@@ -538,6 +538,12 @@ type ClusterStatus struct {
|
||||
// +optional
|
||||
PolicyName string `json:"policyName,omitempty"`
|
||||
|
||||
// policy represents the status of the policy applied to this cluster.
|
||||
// This field is set by the VirtualClusterPolicy controller.
|
||||
//
|
||||
// +optional
|
||||
Policy *AppliedPolicy `json:"policy,omitempty"`
|
||||
|
||||
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
|
||||
//
|
||||
// +optional
|
||||
@@ -561,6 +567,25 @@ type ClusterStatus struct {
|
||||
Phase ClusterPhase `json:"phase,omitempty"`
|
||||
}
|
||||
|
||||
// AppliedPolicy defines the observed state of an applied policy.
|
||||
type AppliedPolicy struct {
|
||||
// name is the name of the VirtualClusterPolicy currently applied to this cluster.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength:=1
|
||||
// +required
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// priorityClass is the priority class enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
PriorityClass *string `json:"priorityClass,omitempty"`
|
||||
|
||||
// nodeSelector is a node selector enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
type ClusterPhase string
|
||||
|
||||
|
||||
@@ -25,6 +25,33 @@ func (in *Addon) DeepCopy() *Addon {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppliedPolicy) DeepCopyInto(out *AppliedPolicy) {
|
||||
*out = *in
|
||||
if in.PriorityClass != nil {
|
||||
in, out := &in.PriorityClass, &out.PriorityClass
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedPolicy.
|
||||
func (in *AppliedPolicy) DeepCopy() *AppliedPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppliedPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Cluster) DeepCopyInto(out *Cluster) {
|
||||
*out = *in
|
||||
@@ -200,6 +227,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Policy != nil {
|
||||
in, out := &in.Policy, &out.Policy
|
||||
*out = new(AppliedPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
|
||||
@@ -2,13 +2,17 @@ package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -52,15 +56,36 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
selector := labels.NewSelector()
|
||||
currentPolicyName := ns.Labels[PolicyNameLabelKey]
|
||||
|
||||
if req, err := labels.NewRequirement(ManagedByLabelKey, selection.Equals, []string{VirtualPolicyControllerName}); err == nil {
|
||||
selector = selector.Add(*req)
|
||||
}
|
||||
// This will match all the resources managed by the K3k Policy controller
|
||||
// that have the app.kubernetes.io/managed-by=k3k-policy-controller label
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
})
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
|
||||
// If the namespace is not bound to any policy, or if the policy it was bound to no longer exists,
|
||||
// we need to clear policy-related fields on its Cluster objects.
|
||||
if currentPolicyName == "" {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in unbound namespace", "namespace", ns.Name)
|
||||
}
|
||||
} else {
|
||||
var policy v1beta1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, types.NamespacedName{Name: currentPolicyName}, &policy); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in namespace with non-existent policy", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
} else {
|
||||
log.Error(err, "error getting policy for namespace", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
}
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
requirement, err := labels.NewRequirement(
|
||||
PolicyNameLabelKey, selection.NotEquals, []string{currentPolicyName},
|
||||
)
|
||||
|
||||
// log the error but continue cleaning up the other namespaces
|
||||
if err != nil {
|
||||
@@ -90,3 +115,30 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearPolicyFieldsForClustersInNamespace sets the policy status on Cluster objects in the given namespace to nil.
|
||||
func (c *VirtualClusterPolicyReconciler) clearPolicyFieldsForClustersInNamespace(ctx context.Context, namespace string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
var clusters v1beta1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace)); err != nil {
|
||||
return fmt.Errorf("failed listing clusters in namespace %s: %w", namespace, err)
|
||||
}
|
||||
|
||||
var updateErrs []error
|
||||
|
||||
for i := range clusters.Items {
|
||||
cluster := clusters.Items[i]
|
||||
if cluster.Status.Policy != nil {
|
||||
log.V(1).Info("Clearing policy status for Cluster", "cluster", cluster.Name, "namespace", namespace)
|
||||
cluster.Status.Policy = nil
|
||||
|
||||
if updateErr := c.Client.Status().Update(ctx, &cluster); updateErr != nil {
|
||||
updateErr = fmt.Errorf("failed updating Status for Cluster %s: %w", cluster.Name, updateErr)
|
||||
updateErrs = append(updateErrs, updateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(updateErrs...)
|
||||
}
|
||||
|
||||
@@ -470,16 +470,21 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
|
||||
var clusterUpdateErrs []error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
orig := cluster.DeepCopy()
|
||||
origStatus := cluster.Status.DeepCopy()
|
||||
|
||||
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
cluster.Status.Policy = &v1beta1.AppliedPolicy{
|
||||
Name: policy.Name,
|
||||
PriorityClass: &policy.Spec.DefaultPriorityClass,
|
||||
NodeSelector: policy.Spec.DefaultNodeSelector,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
if !reflect.DeepEqual(origStatus, &cluster.Status) {
|
||||
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
|
||||
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package policy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -307,7 +306,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
})
|
||||
|
||||
It("should update Cluster's PriorityClass", func() {
|
||||
It("updates the Cluster's policy status with the DefaultPriorityClass", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
})
|
||||
@@ -329,19 +328,22 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Spec.PriorityClass == policy.Spec.DefaultPriorityClass
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
g.Expect(cluster.Spec.PriorityClass).To(BeEmpty())
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.PriorityClass).To(Not(BeNil()))
|
||||
g.Expect(*cluster.Status.Policy.PriorityClass).To(Equal(policy.Spec.DefaultPriorityClass))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should update Cluster's NodeSelector", func() {
|
||||
It("updates the Cluster's policy status with the DefaultNodeSelector", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
@@ -366,18 +368,21 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(BeEmpty())
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
It("updates the Cluster's policy status when the VCP nodeSelector changes", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
@@ -399,43 +404,56 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(cluster.Spec.NodeSelector).To(Equal(policy.Spec.DefaultNodeSelector))
|
||||
// Cluster Spec should not change, VCP NodeSelector should be present in the Status
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
|
||||
// update the VirtualClusterPolicy
|
||||
policy.Spec.DefaultNodeSelector["label-2"] = "value-2"
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-2": "value-2"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
|
||||
// Update the Cluster
|
||||
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
cluster.Spec.NodeSelector["label-3"] = "value-3"
|
||||
err = k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit and check it's restored
|
||||
Eventually(func() bool {
|
||||
var updatedCluster v1beta1.Cluster
|
||||
|
||||
Consistently(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, &updatedCluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(updatedCluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-3": "value-3"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota if Quota is enabled", func() {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
schedv1 "k8s.io/api/scheduling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -99,6 +100,100 @@ var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusT
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("created with field controlled from a policy", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClass)).To(Succeed())
|
||||
|
||||
clusterObj := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "status-cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
PriorityClass: priorityClass.Name,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClass)).To(Succeed())
|
||||
})
|
||||
|
||||
clusterKey := client.ObjectKeyFromObject(clusterObj)
|
||||
|
||||
// Check for the initial status to be set
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionFalse))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioning))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Second * 20).
|
||||
Should(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.Name).To(Equal(vcp.Name))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioned))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
|
||||
// update policy
|
||||
|
||||
priorityClassVCP := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClassVCP)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClassVCP)).To(Succeed())
|
||||
})
|
||||
|
||||
vcp.Spec.DefaultPriorityClass = priorityClassVCP.Name
|
||||
Expect(k8sClient.Update(ctx, vcp)).To(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.PriorityClass).To(Not(BeNil()))
|
||||
g.Expect(*clusterObj.Status.Policy.PriorityClass).To(Equal(priorityClassVCP.Name))
|
||||
g.Expect(clusterObj.Spec.PriorityClass).To(Equal(priorityClass.Name))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Context("and the cluster has validation errors", func() {
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
@@ -77,7 +78,6 @@ var (
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
kubeconfigPath string
|
||||
repo string
|
||||
helmActionConfig *action.Configuration
|
||||
)
|
||||
|
||||
@@ -86,17 +86,17 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
|
||||
|
||||
repo = os.Getenv("REPO")
|
||||
if repo == "" {
|
||||
repo = "rancher"
|
||||
}
|
||||
|
||||
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
|
||||
|
||||
if dockerInstallEnabled {
|
||||
installK3SDocker(ctx)
|
||||
repo := os.Getenv("REPO")
|
||||
if repo == "" {
|
||||
repo = "rancher"
|
||||
}
|
||||
|
||||
installK3SDocker(ctx, repo+"/k3k", repo+"/k3k-kubelet")
|
||||
initKubernetesClient(ctx)
|
||||
installK3kChart()
|
||||
installK3kChart(repo+"/k3k", repo+"/k3k-kubelet")
|
||||
} else {
|
||||
initKubernetesClient(ctx)
|
||||
}
|
||||
@@ -110,6 +110,11 @@ func initKubernetesClient(ctx context.Context) {
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
Expect(kubeconfigPath).To(Not(BeEmpty()))
|
||||
|
||||
@@ -128,21 +133,12 @@ func initKubernetesClient(ctx context.Context) {
|
||||
scheme := buildScheme()
|
||||
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
}
|
||||
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := v1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = appsv1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = networkingv1.AddToScheme(scheme)
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -150,7 +146,7 @@ func buildScheme() *runtime.Scheme {
|
||||
return scheme
|
||||
}
|
||||
|
||||
func installK3SDocker(ctx context.Context) {
|
||||
func installK3SDocker(ctx context.Context, controllerImage, kubeletImage string) {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
@@ -182,16 +178,15 @@ func installK3SDocker(ctx context.Context) {
|
||||
Expect(tmpFile.Close()).To(Succeed())
|
||||
kubeconfigPath = tmpFile.Name()
|
||||
|
||||
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
|
||||
err = k3sContainer.LoadImages(ctx, controllerImage+":dev", kubeletImage+":dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
DeferCleanup(os.Remove, kubeconfigPath)
|
||||
|
||||
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
|
||||
GinkgoWriter.Print(kubeconfigPath)
|
||||
GinkgoWriter.Print(string(kubeconfig))
|
||||
GinkgoWriter.Printf("KUBECONFIG set to: %s\n", kubeconfigPath)
|
||||
}
|
||||
|
||||
func installK3kChart() {
|
||||
func installK3kChart(controllerImage, kubeletImage string) {
|
||||
pwd, err := os.Getwd()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -207,7 +202,7 @@ func installK3kChart() {
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
|
||||
GinkgoWriter.Printf("[Helm] "+format+"\n", v...)
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -219,9 +214,17 @@ func installK3kChart() {
|
||||
iCli.Wait = true
|
||||
|
||||
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
|
||||
|
||||
extraEnvArray, _ := controllerMap["extraEnv"].([]map[string]any)
|
||||
extraEnvArray = append(extraEnvArray, map[string]any{
|
||||
"name": "DEBUG",
|
||||
"value": "true",
|
||||
})
|
||||
controllerMap["extraEnv"] = extraEnvArray
|
||||
|
||||
imageMap, _ := controllerMap["image"].(map[string]any)
|
||||
maps.Copy(imageMap, map[string]any{
|
||||
"repository": repo + "/k3k",
|
||||
"repository": controllerImage,
|
||||
"tag": "dev",
|
||||
"pullPolicy": "IfNotPresent",
|
||||
})
|
||||
@@ -230,14 +233,14 @@ func installK3kChart() {
|
||||
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
|
||||
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
||||
maps.Copy(sharedAgentImageMap, map[string]any{
|
||||
"repository": repo + "/k3k-kubelet",
|
||||
"repository": kubeletImage,
|
||||
"tag": "dev",
|
||||
})
|
||||
|
||||
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("Release %s installed in %s namespace\n", release.Name, release.Namespace)
|
||||
GinkgoWriter.Printf("Helm release '%s' installed in '%s' namespace\n", release.Name, release.Namespace)
|
||||
}
|
||||
|
||||
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
||||
@@ -300,36 +303,28 @@ func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
||||
_, err = clientset.AppsV1().Deployments(k3kNamespace).Update(ctx, k3kDeployment, metav1.UpdateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
GinkgoWriter.Println("Checking K3k deployment status")
|
||||
|
||||
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kDeployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(dep.Generation).To(Equal(dep.Status.ObservedGeneration))
|
||||
|
||||
// 1. Check if the controller has observed the latest generation
|
||||
if dep.Generation > dep.Status.ObservedGeneration {
|
||||
GinkgoWriter.Printf("K3k deployment generation: %d, observed generation: %d\n", dep.Generation, dep.Status.ObservedGeneration)
|
||||
return false
|
||||
var availableCond appsv1.DeploymentCondition
|
||||
|
||||
for _, cond := range dep.Status.Conditions {
|
||||
if cond.Type == appsv1.DeploymentAvailable {
|
||||
availableCond = cond
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Check if all replicas have been updated
|
||||
if dep.Spec.Replicas != nil && dep.Status.UpdatedReplicas < *dep.Spec.Replicas {
|
||||
GinkgoWriter.Printf("K3k deployment replicas: %d, updated replicas: %d\n", *dep.Spec.Replicas, dep.Status.UpdatedReplicas)
|
||||
return false
|
||||
}
|
||||
|
||||
// 3. Check if all updated replicas are available
|
||||
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
|
||||
GinkgoWriter.Printf("K3k deployment available replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
g.Expect(availableCond.Type).To(Equal(appsv1.DeploymentAvailable))
|
||||
g.Expect(availableCond.Status).To(Equal(v1.ConditionTrue))
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 30).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
}
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
|
||||
Reference in New Issue
Block a user