diff --git a/.gitignore b/.gitignore index e39eba7e..29ca9931 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ *.dylib bin dist/ +config/ # Test binary, build with `go test -c` *.test diff --git a/.golangci.yaml b/.golangci.yaml index 296d816a..6fac76e8 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -56,6 +56,10 @@ linters: - third_party$ - builtin$ - examples$ + rules: + - path: pkg/meta/ + linters: + - dupl formatters: enable: - gci diff --git a/.ko.yaml b/.ko.yaml index c5b78621..737e6e52 100644 --- a/.ko.yaml +++ b/.ko.yaml @@ -4,6 +4,6 @@ defaultPlatforms: - linux/arm builds: - id: capsule - main: ./ + main: ./cmd/ ldflags: - '{{ if index .Env "LD_FLAGS" }}{{ .Env.LD_FLAGS }}{{ end }}' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 79bc0ad1..437b9946 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,3 +39,8 @@ repos: entry: make golint language: system files: \.go$ + - id: go-test + name: Execute go test + entry: make test + language: system + files: \.go$ diff --git a/Dockerfile.tracing b/Dockerfile.tracing index e41b0371..92aa9a59 100644 --- a/Dockerfile.tracing +++ b/Dockerfile.tracing @@ -5,7 +5,7 @@ FROM ${TARGET_IMAGE} AS target # Inject Harpoon Image FROM ghcr.io/alegrey91/harpoon:latest WORKDIR / -COPY --from=target /ko-app/capsule ./manager +COPY --from=target /ko-app/cmd ./manager RUN chmod +x ./harpoon ENTRYPOINT ["/harpoon", \ "capture", \ diff --git a/Makefile b/Makefile index a7458d19..05f4006f 100644 --- a/Makefile +++ b/Makefile @@ -178,7 +178,7 @@ LD_FLAGS := "-X main.Version=$(VERSION) \ ko-build-capsule: ko @echo Building Capsule $(KO_TAGS) for $(KO_PLATFORM) >&2 @LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \ - $(KO) build ./ --bare --tags=$(KO_TAGS) --push=false --local --platform=$(KO_PLATFORM) + $(KO) build ./cmd/ --bare --tags=$(KO_TAGS) --push=false --local --platform=$(KO_PLATFORM) .PHONY: ko-build-all ko-build-all: ko-build-capsule @@ -204,7 +204,7 @@ ko-login: ko .PHONY: ko-publish-capsule ko-publish-capsule: ko-login ## Build and publish kyvernopre image (with ko) @LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \ - $(KO) build ./ --bare --tags=$(KO_TAGS) + $(KO) build ./cmd/ --bare --tags=$(KO_TAGS) .PHONY: ko-publish-all ko-publish-all: ko-publish-capsule diff --git a/PROJECT b/PROJECT index bd679d02..b8616a85 100644 --- a/PROJECT +++ b/PROJECT @@ -1,6 +1,10 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: clastix.io layout: -- go.kubebuilder.io/v3 +- go.kubebuilder.io/v4 plugins: manifests.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {} @@ -44,4 +48,20 @@ resources: kind: GlobalTenantResource path: github.com/projectcapsule/capsule/api/v1beta2 version: v1beta2 +- api: + crdVersion: v1 + domain: clastix.io + group: capsule + kind: ResourcePool + path: github.com/projectcapsule/capsule/api/v1beta2 + version: v1beta2 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: clastix.io + group: capsule + kind: ResourcePoolClaim + path: github.com/projectcapsule/capsule/api/v1beta2 + version: v1beta2 version: "3" diff --git a/api/v1beta2/resourcepool_func.go b/api/v1beta2/resourcepool_func.go new file mode 100644 index 00000000..e6a1d021 --- /dev/null +++ b/api/v1beta2/resourcepool_func.go @@ -0,0 +1,276 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package v1beta2 + +import ( + "errors" + "sort" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/projectcapsule/capsule/pkg/api" +) + +func (r *ResourcePool) AssignNamespaces(namespaces []corev1.Namespace) { + var l []string + + for _, ns := range namespaces { + if ns.Status.Phase == corev1.NamespaceActive && ns.DeletionTimestamp == nil { + l = append(l, ns.GetName()) + } + } + + sort.Strings(l) + + r.Status.NamespaceSize = uint(len(l)) + r.Status.Namespaces = l +} + +func (r *ResourcePool) AssignClaims() { + var size uint + + for _, claims := range r.Status.Claims { + for range claims { + size++ + } + } + + r.Status.ClaimSize = size +} + +func (r *ResourcePool) GetClaimFromStatus(cl *ResourcePoolClaim) *ResourcePoolClaimsItem { + ns := cl.Namespace + + claims := r.Status.Claims[ns] + if claims == nil { + return nil + } + + for _, claim := range claims { + if claim.UID == cl.UID { + return claim + } + } + + return nil +} + +func (r *ResourcePool) AddClaimToStatus(claim *ResourcePoolClaim) { + ns := claim.Namespace + + if r.Status.Claims == nil { + r.Status.Claims = ResourcePoolNamespaceClaimsStatus{} + } + + if r.Status.Allocation.Claimed == nil { + r.Status.Allocation.Claimed = corev1.ResourceList{} + } + + claims := r.Status.Claims[ns] + if claims == nil { + claims = ResourcePoolClaimsList{} + } + + scl := &ResourcePoolClaimsItem{ + StatusNameUID: api.StatusNameUID{ + UID: claim.UID, + Name: api.Name(claim.Name), + }, + Claims: claim.Spec.ResourceClaims, + } + + // Try to update existing entry if UID matches + exists := false + + for i, cl := range claims { + if cl.UID == claim.UID { + claims[i] = scl + + exists = true + + break + } + } + + if !exists { + claims = append(claims, scl) + } + + r.Status.Claims[ns] = claims + + r.CalculateClaimedResources() +} + +func (r *ResourcePool) RemoveClaimFromStatus(claim *ResourcePoolClaim) { + newClaims := ResourcePoolClaimsList{} + + claims, ok := r.Status.Claims[claim.Namespace] + if !ok { + return + } + + for _, cl := range claims { + if cl.UID != claim.UID { + newClaims = append(newClaims, cl) + } + } + + r.Status.Claims[claim.Namespace] = newClaims + + if len(newClaims) == 0 { + delete(r.Status.Claims, claim.Namespace) + } +} + +func (r *ResourcePool) CalculateClaimedResources() { + usage := corev1.ResourceList{} + + for res := range r.Status.Allocation.Hard { + usage[res] = resource.MustParse("0") + } + + for _, claims := range r.Status.Claims { + for _, claim := range claims { + for resourceName, qt := range claim.Claims { + amount, exists := usage[resourceName] + if !exists { + amount = resource.MustParse("0") + } + + amount.Add(qt) + usage[resourceName] = amount + } + } + } + + r.Status.Allocation.Claimed = usage + + r.CalculateAvailableResources() +} + +func (r *ResourcePool) CalculateAvailableResources() { + available := corev1.ResourceList{} + + for res, qt := range r.Status.Allocation.Hard { + amount, exists := r.Status.Allocation.Claimed[res] + if exists { + qt.Sub(amount) + } + + available[res] = qt + } + + r.Status.Allocation.Available = available +} + +func (r *ResourcePool) CanClaimFromPool(claim corev1.ResourceList) []error { + claimable := r.GetAvailableClaimableResources() + errs := []error{} + + for resourceName, req := range claim { + available, exists := claimable[resourceName] + if !exists || available.IsZero() || available.Cmp(req) < 0 { + errs = append(errs, errors.New("not enough resources"+string(resourceName)+"available")) + } + } + + return errs +} + +func (r *ResourcePool) GetAvailableClaimableResources() corev1.ResourceList { + hard := r.Status.Allocation.Hard.DeepCopy() + + for resourceName, qt := range hard { + claimed, exists := r.Status.Allocation.Claimed[resourceName] + if !exists { + claimed = resource.MustParse("0") + } + + qt.Sub(claimed) + + hard[resourceName] = qt + } + + return hard +} + +// Gets the Hard specification for the resourcequotas +// This takes into account the default resources being used. However they don't count towards the claim usage +// This can be changed in the future, the default is not calculated as usage because this might interrupt the namespace management +// As we would need to verify if a new namespace with it's defaults still has place in the Pool. Same with attempting to join existing namespaces. +func (r *ResourcePool) GetResourceQuotaHardResources(namespace string) corev1.ResourceList { + _, claimed := r.GetNamespaceClaims(namespace) + + for resourceName, amount := range claimed { + if amount.IsZero() { + delete(claimed, resourceName) + } + } + + // Only Consider Default, when enabled + for resourceName, amount := range r.Spec.Defaults { + usedValue := claimed[resourceName] + usedValue.Add(amount) + + claimed[resourceName] = usedValue + } + + return claimed +} + +// Gets the total amount of claimed resources for a namespace. +func (r *ResourcePool) GetNamespaceClaims(namespace string) (claims map[string]*ResourcePoolClaimsItem, claimedResources corev1.ResourceList) { + claimedResources = corev1.ResourceList{} + claims = map[string]*ResourcePoolClaimsItem{} + + // First, check if quota exists in the status + for ns, cl := range r.Status.Claims { + if ns != namespace { + continue + } + + for _, claim := range cl { + for resourceName, claimed := range claim.Claims { + usedValue, usedExists := claimedResources[resourceName] + if !usedExists { + usedValue = resource.MustParse("0") // Default to zero if no used value is found + } + + // Combine with claim + usedValue.Add(claimed) + claimedResources[resourceName] = usedValue + } + + claims[string(claim.UID)] = claim + } + } + + return +} + +// Calculate usage for each namespace. +func (r *ResourcePool) GetClaimedByNamespaceClaims() (claims map[string]corev1.ResourceList) { + claims = map[string]corev1.ResourceList{} + + // First, check if quota exists in the status + for ns, cl := range r.Status.Claims { + claims[ns] = corev1.ResourceList{} + nsScope := claims[ns] + + for _, claim := range cl { + for resourceName, claimed := range claim.Claims { + usedValue, usedExists := nsScope[resourceName] + if !usedExists { + usedValue = resource.MustParse("0") + } + + usedValue.Add(claimed) + nsScope[resourceName] = usedValue + } + } + } + + return +} diff --git a/api/v1beta2/resourcepool_func_test.go b/api/v1beta2/resourcepool_func_test.go new file mode 100644 index 00000000..6bee6995 --- /dev/null +++ b/api/v1beta2/resourcepool_func_test.go @@ -0,0 +1,292 @@ +package v1beta2 + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/projectcapsule/capsule/pkg/api" + "github.com/projectcapsule/capsule/pkg/meta" + "github.com/stretchr/testify/assert" +) + +func TestGetClaimFromStatus(t *testing.T) { + ns := "test-namespace" + testUID := types.UID("test-uid") + otherUID := types.UID("wrong-uid") + + claim := &ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "claim-a", + Namespace: ns, + UID: testUID, + }, + } + + pool := &ResourcePool{ + Status: ResourcePoolStatus{ + Claims: ResourcePoolNamespaceClaimsStatus{ + ns: { + &ResourcePoolClaimsItem{ + StatusNameUID: api.StatusNameUID{ + UID: testUID, + }, + Claims: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), + }, + }, + }, + }, + }, + } + + t.Run("returns matching claim", func(t *testing.T) { + found := pool.GetClaimFromStatus(claim) + assert.NotNil(t, found) + assert.Equal(t, testUID, found.UID) + }) + + t.Run("returns nil if UID doesn't match", func(t *testing.T) { + claimWrongUID := *claim + claimWrongUID.UID = otherUID + + found := pool.GetClaimFromStatus(&claimWrongUID) + assert.Nil(t, found) + }) + + t.Run("returns nil if namespace has no claims", func(t *testing.T) { + claimWrongNS := *claim + claimWrongNS.Namespace = "other-ns" + + found := pool.GetClaimFromStatus(&claimWrongNS) + assert.Nil(t, found) + }) +} + +func makeResourceList(cpu, memory string) corev1.ResourceList { + return corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse(cpu), + corev1.ResourceLimitsMemory: resource.MustParse(memory), + } +} + +func makeClaim(name, ns string, uid types.UID, res corev1.ResourceList) *ResourcePoolClaim { + return &ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + UID: uid, + }, + Spec: ResourcePoolClaimSpec{ + ResourceClaims: res, + }, + } +} + +func TestAssignNamespaces(t *testing.T) { + pool := &ResourcePool{} + + namespaces := []corev1.Namespace{ + {ObjectMeta: metav1.ObjectMeta{Name: "active-ns"}, Status: corev1.NamespaceStatus{Phase: corev1.NamespaceActive}}, + {ObjectMeta: metav1.ObjectMeta{Name: "terminating-ns", DeletionTimestamp: &metav1.Time{}}, Status: corev1.NamespaceStatus{Phase: corev1.NamespaceTerminating}}, + } + + pool.AssignNamespaces(namespaces) + + assert.Equal(t, uint(1), pool.Status.NamespaceSize) + assert.Equal(t, []string{"active-ns"}, pool.Status.Namespaces) +} + +func TestAssignClaims(t *testing.T) { + pool := &ResourcePool{ + Status: ResourcePoolStatus{ + Claims: ResourcePoolNamespaceClaimsStatus{ + "ns": { + &ResourcePoolClaimsItem{}, + &ResourcePoolClaimsItem{}, + }, + }, + }, + } + pool.AssignClaims() + + assert.Equal(t, uint(2), pool.Status.ClaimSize) +} + +func TestAddRemoveClaimToStatus(t *testing.T) { + pool := &ResourcePool{} + + claim := makeClaim("claim-1", "ns", "uid-1", makeResourceList("1", "1Gi")) + pool.AddClaimToStatus(claim) + + stored := pool.GetClaimFromStatus(claim) + assert.NotNil(t, stored) + assert.Equal(t, api.Name("claim-1"), stored.Name) + + pool.RemoveClaimFromStatus(claim) + assert.Nil(t, pool.GetClaimFromStatus(claim)) +} + +func TestCalculateResources(t *testing.T) { + pool := &ResourcePool{ + Status: ResourcePoolStatus{ + Allocation: ResourcePoolQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + }, + }, + Claims: ResourcePoolNamespaceClaimsStatus{ + "ns": { + &ResourcePoolClaimsItem{ + Claims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + } + + pool.CalculateClaimedResources() + + actualClaimed := pool.Status.Allocation.Claimed[corev1.ResourceLimitsCPU] + actualAvailable := pool.Status.Allocation.Available[corev1.ResourceLimitsCPU] + + assert.Equal(t, 0, (&actualClaimed).Cmp(resource.MustParse("1"))) + assert.Equal(t, 0, (&actualAvailable).Cmp(resource.MustParse("1"))) +} + +func TestCanClaimFromPool(t *testing.T) { + pool := &ResourcePool{ + Status: ResourcePoolStatus{ + Allocation: ResourcePoolQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + }, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + }, + }, + }, + } + + errs := pool.CanClaimFromPool(corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + }) + assert.Len(t, errs, 1) + + errs = pool.CanClaimFromPool(corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("500Mi"), + }) + assert.Len(t, errs, 0) +} + +func TestGetResourceQuotaHardResources(t *testing.T) { + pool := &ResourcePool{ + Spec: ResourcePoolSpec{ + Defaults: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + }, + }, + Status: ResourcePoolStatus{ + Claims: ResourcePoolNamespaceClaimsStatus{ + "ns": { + &ResourcePoolClaimsItem{ + Claims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + } + + res := pool.GetResourceQuotaHardResources("ns") + actual := res[corev1.ResourceLimitsCPU] + assert.Equal(t, 0, (&actual).Cmp(resource.MustParse("2"))) +} + +func TestGetNamespaceClaims(t *testing.T) { + pool := &ResourcePool{ + Status: ResourcePoolStatus{ + Claims: ResourcePoolNamespaceClaimsStatus{ + "ns": { + &ResourcePoolClaimsItem{ + StatusNameUID: api.StatusNameUID{UID: "uid1"}, + Claims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + } + + claims, res := pool.GetNamespaceClaims("ns") + assert.Contains(t, claims, "uid1") + actual := res[corev1.ResourceLimitsCPU] + assert.Equal(t, 0, (&actual).Cmp(resource.MustParse("1"))) +} + +func TestGetClaimedByNamespaceClaims(t *testing.T) { + pool := &ResourcePool{ + Status: ResourcePoolStatus{ + Claims: ResourcePoolNamespaceClaimsStatus{ + "ns1": { + &ResourcePoolClaimsItem{ + Claims: makeResourceList("1", "1Gi"), + }, + }, + }, + }, + } + + result := pool.GetClaimedByNamespaceClaims() + actualCPU := result["ns1"][corev1.ResourceLimitsCPU] + actualMem := result["ns1"][corev1.ResourceLimitsMemory] + + assert.Equal(t, 0, (&actualCPU).Cmp(resource.MustParse("1"))) + assert.Equal(t, 0, (&actualMem).Cmp(resource.MustParse("1Gi"))) +} + +func TestIsBoundToResourcePool_2(t *testing.T) { + t.Run("bound to resource pool (Assigned=True)", func(t *testing.T) { + claim := &ResourcePoolClaim{ + Status: ResourcePoolClaimStatus{ + Condition: metav1.Condition{ + Type: meta.BoundCondition, + Status: metav1.ConditionTrue, + }, + }, + } + assert.Equal(t, true, claim.IsBoundToResourcePool()) + }) + + t.Run("not bound - wrong condition type", func(t *testing.T) { + claim := &ResourcePoolClaim{ + Status: ResourcePoolClaimStatus{ + Condition: metav1.Condition{ + Type: "Other", + Status: metav1.ConditionTrue, + }, + }, + } + assert.Equal(t, false, claim.IsBoundToResourcePool()) + }) + + t.Run("not bound - condition not true", func(t *testing.T) { + claim := &ResourcePoolClaim{ + Status: ResourcePoolClaimStatus{ + Condition: metav1.Condition{ + Type: meta.BoundCondition, + Status: metav1.ConditionFalse, + }, + }, + } + assert.Equal(t, false, claim.IsBoundToResourcePool()) + }) +} diff --git a/api/v1beta2/resourcepool_status.go b/api/v1beta2/resourcepool_status.go new file mode 100644 index 00000000..9a6763fd --- /dev/null +++ b/api/v1beta2/resourcepool_status.go @@ -0,0 +1,62 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/projectcapsule/capsule/pkg/api" +) + +// GlobalResourceQuotaStatus defines the observed state of GlobalResourceQuota. +type ResourcePoolStatus struct { + // How many namespaces are considered + // +kubebuilder:default=0 + NamespaceSize uint `json:"namespaceCount,omitempty"` + // Amount of claims + // +kubebuilder:default=0 + ClaimSize uint `json:"claimCount,omitempty"` + // Namespaces which are considered for claims + Namespaces []string `json:"namespaces,omitempty"` + // Tracks the quotas for the Resource. + Claims ResourcePoolNamespaceClaimsStatus `json:"claims,omitempty"` + // Tracks the Usage from Claimed against what has been granted from the pool + Allocation ResourcePoolQuotaStatus `json:"allocation,omitempty"` +} + +type ResourcePoolNamespaceClaimsStatus map[string]ResourcePoolClaimsList + +type ResourcePoolQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + // +optional + Hard corev1.ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // Used is the current observed total usage of the resource in the namespace. + // +optional + Claimed corev1.ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` + // Used to track the usage of the resource in the pool (diff hard - claimed). May be used for further automation + // +optional + Available corev1.ResourceList `json:"available,omitempty" protobuf:"bytes,2,rep,name=available,casttype=ResourceList,castkey=ResourceName"` +} + +type ResourcePoolClaimsList []*ResourcePoolClaimsItem + +func (r *ResourcePoolClaimsList) GetClaimByUID(uid types.UID) *ResourcePoolClaimsItem { + for _, claim := range *r { + if claim.UID == uid { + return claim + } + } + + return nil +} + +// ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim. +type ResourcePoolClaimsItem struct { + // Reference to the GlobalQuota being claimed from + api.StatusNameUID `json:",inline"` + // Claimed resources + Claims corev1.ResourceList `json:"claims,omitempty"` +} diff --git a/api/v1beta2/resourcepool_types.go b/api/v1beta2/resourcepool_types.go new file mode 100644 index 00000000..a3a11206 --- /dev/null +++ b/api/v1beta2/resourcepool_types.go @@ -0,0 +1,76 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/projectcapsule/capsule/pkg/api" +) + +// ResourcePoolSpec. +type ResourcePoolSpec struct { + // Selector to match the namespaces that should be managed by the GlobalResourceQuota + Selectors []api.NamespaceSelector `json:"selectors,omitempty"` + // Define the resourcequota served by this resourcepool. + Quota corev1.ResourceQuotaSpec `json:"quota"` + // The Defaults given for each namespace, the default is not counted towards the total allocation + // When you use claims it's recommended to provision Defaults as the prevent the scheduling of any resources + Defaults corev1.ResourceList `json:"defaults,omitempty"` + // Additional Configuration + //+kubebuilder:default:={} + Config ResourcePoolSpecConfiguration `json:"config,omitempty"` +} + +type ResourcePoolSpecConfiguration struct { + // With this option all resources which can be allocated are set to 0 for the resourcequota defaults. + // +kubebuilder:default=false + DefaultsAssignZero *bool `json:"defaultsZero,omitempty"` + // Claims are queued whenever they are allocated to a pool. A pool tries to allocate claims in order based on their + // creation date. But no matter their creation time, if a claim is requesting too much resources it's put into the queue + // but if a lower priority claim still has enough space in the available resources, it will be able to claim them. Eventough + // it's priority was lower + // Enabling this option respects to Order. Meaning the Creationtimestamp matters and if a resource is put into the queue, no + // other claim can claim the same resources with lower priority. + // +kubebuilder:default=false + OrderedQueue *bool `json:"orderedQueue,omitempty"` + // When a resourcepool is deleted, the resourceclaims bound to it are disassociated from the resourcepool but not deleted. + // By Enabling this option, the resourceclaims will be deleted when the resourcepool is deleted, if they are in bound state. + // +kubebuilder:default=false + DeleteBoundResources *bool `json:"deleteBoundResources,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=quotapool +// +kubebuilder:printcolumn:name="Claims",type="integer",JSONPath=".status.claimCount",description="The total amount of Claims bound" +// +kubebuilder:printcolumn:name="Namespaces",type="integer",JSONPath=".status.namespaceCount",description="The total amount of Namespaces considered" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age" + +// Resourcepools allows you to define a set of resources as known from ResoureQuotas. The Resourcepools are defined at cluster-scope an should +// be administrated by cluster-administrators. However they create an interface, where cluster-administrators can define +// from which namespaces resources from a Resourcepool can be claimed. The claiming is done via a namespaced CRD called ResourcePoolClaim. Then +// it's up the group of users within these namespaces, to manage the resources they consume per namespace. Each Resourcepool provisions a ResourceQuotainto all the selected namespaces. Then essentially the ResourcePoolClaims, when they can be assigned to the ResourcePool stack resources on top of that +// ResourceQuota based on the namspace, where the ResourcePoolClaim was made from. +type ResourcePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ResourcePoolSpec `json:"spec,omitempty"` + Status ResourcePoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourcePoolList contains a list of ResourcePool. +type ResourcePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourcePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ResourcePool{}, &ResourcePoolList{}) +} diff --git a/api/v1beta2/resourcepoolclaim_func.go b/api/v1beta2/resourcepoolclaim_func.go new file mode 100644 index 00000000..7ff9a880 --- /dev/null +++ b/api/v1beta2/resourcepoolclaim_func.go @@ -0,0 +1,20 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/projectcapsule/capsule/pkg/meta" +) + +// Indicate the claim is bound to a resource pool. +func (r *ResourcePoolClaim) IsBoundToResourcePool() bool { + if r.Status.Condition.Type == meta.BoundCondition && + r.Status.Condition.Status == metav1.ConditionTrue { + return true + } + + return false +} diff --git a/api/v1beta2/resourcepoolclaim_func_test.go b/api/v1beta2/resourcepoolclaim_func_test.go new file mode 100644 index 00000000..35ddc069 --- /dev/null +++ b/api/v1beta2/resourcepoolclaim_func_test.go @@ -0,0 +1,71 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package v1beta2 + +import ( + "testing" + + "github.com/projectcapsule/capsule/pkg/meta" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestIsBoundToResourcePool(t *testing.T) { + tests := []struct { + name string + claim ResourcePoolClaim + expected bool + }{ + { + name: "bound to resource pool (Assigned=True)", + claim: ResourcePoolClaim{ + Status: ResourcePoolClaimStatus{ + Condition: metav1.Condition{ + Type: meta.BoundCondition, + Status: metav1.ConditionTrue, + }, + }, + }, + expected: true, + }, + { + name: "not bound - wrong condition type", + claim: ResourcePoolClaim{ + Status: ResourcePoolClaimStatus{ + Condition: metav1.Condition{ + Type: "SomethingElse", + Status: metav1.ConditionTrue, + }, + }, + }, + expected: false, + }, + { + name: "not bound - status not true", + claim: ResourcePoolClaim{ + Status: ResourcePoolClaimStatus{ + Condition: metav1.Condition{ + Type: meta.BoundCondition, + Status: metav1.ConditionFalse, + }, + }, + }, + expected: false, + }, + { + name: "not bound - empty condition", + claim: ResourcePoolClaim{ + Status: ResourcePoolClaimStatus{}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.claim.IsBoundToResourcePool() + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/api/v1beta2/resourcepoolclaim_types.go b/api/v1beta2/resourcepoolclaim_types.go new file mode 100644 index 00000000..c7eff4d8 --- /dev/null +++ b/api/v1beta2/resourcepoolclaim_types.go @@ -0,0 +1,58 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/projectcapsule/capsule/pkg/api" +) + +type ResourcePoolClaimSpec struct { + // If there's the possability to claim from multiple global Quotas + // You must be specific about which one you want to claim resources from + // Once bound to a ResourcePool, this field is immutable + Pool string `json:"pool"` + // Amount which should be claimed for the resourcequota + ResourceClaims corev1.ResourceList `json:"claim"` +} + +// ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim. +type ResourcePoolClaimStatus struct { + // Reference to the GlobalQuota being claimed from + Pool api.StatusNameUID `json:"pool,omitempty"` + // Condtion for this resource claim + Condition metav1.Condition `json:"condition,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Pool",type="string",JSONPath=".status.pool.name",description="The ResourcePool being claimed from" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.condition.type",description="Status for claim" +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.condition.reason",description="Reason for status" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.condition.message",description="Condition Message" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// ResourcePoolClaim is the Schema for the resourcepoolclaims API. +type ResourcePoolClaim struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ResourcePoolClaimSpec `json:"spec,omitempty"` + Status ResourcePoolClaimStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceQuotaClaimList contains a list of ResourceQuotaClaim. +type ResourcePoolClaimList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourcePoolClaim `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ResourcePoolClaim{}, &ResourcePoolClaimList{}) +} diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 52bcddba..2b3b9115 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -9,6 +9,7 @@ package v1beta2 import ( "github.com/projectcapsule/capsule/pkg/api" + corev1 "k8s.io/api/core/v1" "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -517,6 +518,387 @@ func (in *RawExtension) DeepCopy() *RawExtension { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool. +func (in *ResourcePool) DeepCopy() *ResourcePool { + if in == nil { + return nil + } + out := new(ResourcePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolClaim) DeepCopyInto(out *ResourcePoolClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaim. +func (in *ResourcePoolClaim) DeepCopy() *ResourcePoolClaim { + if in == nil { + return nil + } + out := new(ResourcePoolClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePoolClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolClaimList) DeepCopyInto(out *ResourcePoolClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourcePoolClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimList. +func (in *ResourcePoolClaimList) DeepCopy() *ResourcePoolClaimList { + if in == nil { + return nil + } + out := new(ResourcePoolClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePoolClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolClaimSpec) DeepCopyInto(out *ResourcePoolClaimSpec) { + *out = *in + if in.ResourceClaims != nil { + in, out := &in.ResourceClaims, &out.ResourceClaims + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimSpec. +func (in *ResourcePoolClaimSpec) DeepCopy() *ResourcePoolClaimSpec { + if in == nil { + return nil + } + out := new(ResourcePoolClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolClaimStatus) DeepCopyInto(out *ResourcePoolClaimStatus) { + *out = *in + out.Pool = in.Pool + in.Condition.DeepCopyInto(&out.Condition) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimStatus. +func (in *ResourcePoolClaimStatus) DeepCopy() *ResourcePoolClaimStatus { + if in == nil { + return nil + } + out := new(ResourcePoolClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolClaimsItem) DeepCopyInto(out *ResourcePoolClaimsItem) { + *out = *in + out.StatusNameUID = in.StatusNameUID + if in.Claims != nil { + in, out := &in.Claims, &out.Claims + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimsItem. +func (in *ResourcePoolClaimsItem) DeepCopy() *ResourcePoolClaimsItem { + if in == nil { + return nil + } + out := new(ResourcePoolClaimsItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourcePoolClaimsList) DeepCopyInto(out *ResourcePoolClaimsList) { + { + in := &in + *out = make(ResourcePoolClaimsList, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourcePoolClaimsItem) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimsList. +func (in ResourcePoolClaimsList) DeepCopy() ResourcePoolClaimsList { + if in == nil { + return nil + } + out := new(ResourcePoolClaimsList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolList) DeepCopyInto(out *ResourcePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourcePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolList. +func (in *ResourcePoolList) DeepCopy() *ResourcePoolList { + if in == nil { + return nil + } + out := new(ResourcePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourcePoolNamespaceClaimsStatus) DeepCopyInto(out *ResourcePoolNamespaceClaimsStatus) { + { + in := &in + *out = make(ResourcePoolNamespaceClaimsStatus, len(*in)) + for key, val := range *in { + var outVal []*ResourcePoolClaimsItem + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(ResourcePoolClaimsList, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourcePoolClaimsItem) + (*in).DeepCopyInto(*out) + } + } + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolNamespaceClaimsStatus. +func (in ResourcePoolNamespaceClaimsStatus) DeepCopy() ResourcePoolNamespaceClaimsStatus { + if in == nil { + return nil + } + out := new(ResourcePoolNamespaceClaimsStatus) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolQuotaStatus) DeepCopyInto(out *ResourcePoolQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Claimed != nil { + in, out := &in.Claimed, &out.Claimed + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Available != nil { + in, out := &in.Available, &out.Available + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolQuotaStatus. +func (in *ResourcePoolQuotaStatus) DeepCopy() *ResourcePoolQuotaStatus { + if in == nil { + return nil + } + out := new(ResourcePoolQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolSpec) DeepCopyInto(out *ResourcePoolSpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]api.NamespaceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Quota.DeepCopyInto(&out.Quota) + if in.Defaults != nil { + in, out := &in.Defaults, &out.Defaults + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.Config.DeepCopyInto(&out.Config) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolSpec. +func (in *ResourcePoolSpec) DeepCopy() *ResourcePoolSpec { + if in == nil { + return nil + } + out := new(ResourcePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolSpecConfiguration) DeepCopyInto(out *ResourcePoolSpecConfiguration) { + *out = *in + if in.DefaultsAssignZero != nil { + in, out := &in.DefaultsAssignZero, &out.DefaultsAssignZero + *out = new(bool) + **out = **in + } + if in.OrderedQueue != nil { + in, out := &in.OrderedQueue, &out.OrderedQueue + *out = new(bool) + **out = **in + } + if in.DeleteBoundResources != nil { + in, out := &in.DeleteBoundResources, &out.DeleteBoundResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolSpecConfiguration. +func (in *ResourcePoolSpecConfiguration) DeepCopy() *ResourcePoolSpecConfiguration { + if in == nil { + return nil + } + out := new(ResourcePoolSpecConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePoolStatus) DeepCopyInto(out *ResourcePoolStatus) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Claims != nil { + in, out := &in.Claims, &out.Claims + *out = make(ResourcePoolNamespaceClaimsStatus, len(*in)) + for key, val := range *in { + var outVal []*ResourcePoolClaimsItem + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(ResourcePoolClaimsList, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourcePoolClaimsItem) + (*in).DeepCopyInto(*out) + } + } + } + (*out)[key] = outVal + } + } + in.Allocation.DeepCopyInto(&out.Allocation) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolStatus. +func (in *ResourcePoolStatus) DeepCopy() *ResourcePoolStatus { + if in == nil { + return nil + } + out := new(ResourcePoolStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { *out = *in diff --git a/charts/capsule/README.md b/charts/capsule/README.md index 45e20e79..b4c16799 100644 --- a/charts/capsule/README.md +++ b/charts/capsule/README.md @@ -134,6 +134,10 @@ Here the values you can override: | ports | list | `[]` | Set additional ports for the deployment | | priorityClassName | string | `""` | Set the priority class name of the Capsule pod | | proxy.enabled | bool | `false` | Enable Installation of Capsule Proxy | +| rbac.resourcepoolclaims.create | bool | `false` | | +| rbac.resourcepoolclaims.labels."rbac.authorization.k8s.io/aggregate-to-admin" | string | `"true"` | | +| rbac.resources.create | bool | `false` | | +| rbac.resources.labels."rbac.authorization.k8s.io/aggregate-to-admin" | string | `"true"` | | | replicaCount | int | `1` | Set the replica count for capsule pod | | securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true}` | Set the securityContext for the Capsule container | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account. | @@ -234,6 +238,16 @@ Here the values you can override: | webhooks.hooks.pods.failurePolicy | string | `"Fail"` | | | webhooks.hooks.pods.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | | | webhooks.hooks.pods.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | | +| webhooks.hooks.resourcepools.claims.failurePolicy | string | `"Fail"` | | +| webhooks.hooks.resourcepools.claims.matchPolicy | string | `"Equivalent"` | | +| webhooks.hooks.resourcepools.claims.namespaceSelector | object | `{}` | | +| webhooks.hooks.resourcepools.claims.objectSelector | object | `{}` | | +| webhooks.hooks.resourcepools.claims.reinvocationPolicy | string | `"Never"` | | +| webhooks.hooks.resourcepools.pools.failurePolicy | string | `"Fail"` | | +| webhooks.hooks.resourcepools.pools.matchPolicy | string | `"Equivalent"` | | +| webhooks.hooks.resourcepools.pools.namespaceSelector | object | `{}` | | +| webhooks.hooks.resourcepools.pools.objectSelector | object | `{}` | | +| webhooks.hooks.resourcepools.pools.reinvocationPolicy | string | `"Never"` | | | webhooks.hooks.services.failurePolicy | string | `"Fail"` | | | webhooks.hooks.services.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | | | webhooks.hooks.services.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | | diff --git a/charts/capsule/crds/capsule.clastix.io_resourcepoolclaims.yaml b/charts/capsule/crds/capsule.clastix.io_resourcepoolclaims.yaml new file mode 100644 index 00000000..9de2d79f --- /dev/null +++ b/charts/capsule/crds/capsule.clastix.io_resourcepoolclaims.yaml @@ -0,0 +1,158 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: resourcepoolclaims.capsule.clastix.io +spec: + group: capsule.clastix.io + names: + kind: ResourcePoolClaim + listKind: ResourcePoolClaimList + plural: resourcepoolclaims + singular: resourcepoolclaim + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The ResourcePool being claimed from + jsonPath: .status.pool.name + name: Pool + type: string + - description: Status for claim + jsonPath: .status.condition.type + name: Status + type: string + - description: Reason for status + jsonPath: .status.condition.reason + name: Reason + type: string + - description: Condition Message + jsonPath: .status.condition.message + name: Message + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourcePoolClaim is the Schema for the resourcepoolclaims API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + claim: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Amount which should be claimed for the resourcequota + type: object + pool: + description: |- + If there's the possability to claim from multiple global Quotas + You must be specific about which one you want to claim resources from + Once bound to a ResourcePool, this field is immutable + type: string + required: + - claim + - pool + type: object + status: + description: ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim. + properties: + condition: + description: Condtion for this resource claim + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + pool: + description: Reference to the GlobalQuota being claimed from + properties: + name: + description: Name + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + namespace: + description: Namespace + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + uid: + description: UID of the tracked Tenant to pin point tracking + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/capsule/crds/capsule.clastix.io_resourcepools.yaml b/charts/capsule/crds/capsule.clastix.io_resourcepools.yaml new file mode 100644 index 00000000..a1ceaf73 --- /dev/null +++ b/charts/capsule/crds/capsule.clastix.io_resourcepools.yaml @@ -0,0 +1,308 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: resourcepools.capsule.clastix.io +spec: + group: capsule.clastix.io + names: + kind: ResourcePool + listKind: ResourcePoolList + plural: resourcepools + shortNames: + - quotapool + singular: resourcepool + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The total amount of Claims bound + jsonPath: .status.claimCount + name: Claims + type: integer + - description: The total amount of Namespaces considered + jsonPath: .status.namespaceCount + name: Namespaces + type: integer + - description: Age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: |- + Resourcepools allows you to define a set of resources as known from ResoureQuotas. The Resourcepools are defined at cluster-scope an should + be administrated by cluster-administrators. However they create an interface, where cluster-administrators can define + from which namespaces resources from a Resourcepool can be claimed. The claiming is done via a namespaced CRD called ResourcePoolClaim. Then + it's up the group of users within these namespaces, to manage the resources they consume per namespace. Each Resourcepool provisions a ResourceQuotainto all the selected namespaces. Then essentially the ResourcePoolClaims, when they can be assigned to the ResourcePool stack resources on top of that + ResourceQuota based on the namspace, where the ResourcePoolClaim was made from. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourcePoolSpec. + properties: + config: + default: {} + description: Additional Configuration + properties: + defaultsZero: + default: false + description: With this option all resources which can be allocated + are set to 0 for the resourcequota defaults. + type: boolean + deleteBoundResources: + default: false + description: |- + When a resourcepool is deleted, the resourceclaims bound to it are disassociated from the resourcepool but not deleted. + By Enabling this option, the resourceclaims will be deleted when the resourcepool is deleted, if they are in bound state. + type: boolean + orderedQueue: + default: false + description: |- + Claims are queued whenever they are allocated to a pool. A pool tries to allocate claims in order based on their + creation date. But no matter their creation time, if a claim is requesting too much resources it's put into the queue + but if a lower priority claim still has enough space in the available resources, it will be able to claim them. Eventough + it's priority was lower + Enabling this option respects to Order. Meaning the Creationtimestamp matters and if a resource is put into the queue, no + other claim can claim the same resources with lower priority. + type: boolean + type: object + defaults: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + The Defaults given for each namespace, the default is not counted towards the total allocation + When you use claims it's recommended to provision Defaults as the prevent the scheduling of any resources + type: object + quota: + description: Define the resourcequota served by this resourcepool. + properties: + hard: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + hard is the set of desired hard limits for each named resource. + More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + type: object + scopeSelector: + description: |- + scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota + but expressed using ScopeSelectorOperator in combination with possible values. + For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + properties: + matchExpressions: + description: A list of scope selector requirements by scope + of the resources. + items: + description: |- + A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator + that relates the scope name and values. + properties: + operator: + description: |- + Represents a scope's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. + type: string + scopeName: + description: The name of the scope that the selector + applies to. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - operator + - scopeName + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + scopes: + description: |- + A collection of filters that must match each object tracked by a quota. + If not specified, the quota matches all objects. + items: + description: A ResourceQuotaScope defines a filter that must + match each object tracked by a quota + type: string + type: array + x-kubernetes-list-type: atomic + type: object + selectors: + description: Selector to match the namespaces that should be managed + by the GlobalResourceQuota + items: + description: Selector for resources and their labels or selecting + origin namespaces + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + required: + - quota + type: object + status: + description: GlobalResourceQuotaStatus defines the observed state of GlobalResourceQuota. + properties: + allocation: + description: Tracks the Usage from Claimed against what has been granted + from the pool + properties: + available: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Used to track the usage of the resource in the pool + (diff hard - claimed). May be used for further automation + type: object + hard: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Hard is the set of enforced hard limits for each named resource. + More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + type: object + used: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Used is the current observed total usage of the resource + in the namespace. + type: object + type: object + claimCount: + default: 0 + description: Amount of claims + type: integer + claims: + additionalProperties: + items: + description: ResourceQuotaClaimStatus defines the observed state + of ResourceQuotaClaim. + properties: + claims: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Claimed resources + type: object + name: + description: Name + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + namespace: + description: Namespace + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + uid: + description: UID of the tracked Tenant to pin point tracking + type: string + type: object + type: array + description: Tracks the quotas for the Resource. + type: object + namespaceCount: + default: 0 + description: How many namespaces are considered + type: integer + namespaces: + description: Namespaces which are considered for claims + items: + type: string + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/capsule/templates/mutatingwebhookconfiguration.yaml b/charts/capsule/templates/mutatingwebhookconfiguration.yaml index 600a5832..a9826ecf 100644 --- a/charts/capsule/templates/mutatingwebhookconfiguration.yaml +++ b/charts/capsule/templates/mutatingwebhookconfiguration.yaml @@ -135,5 +135,57 @@ webhooks: scope: '*' sideEffects: NoneOnDryRun timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }} -{{- end }} + {{- end }} + {{- with .Values.webhooks.hooks.resourcepools.pools }} +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + {{- include "capsule.webhooks.service" (dict "path" "/resourcepool/mutating" "ctx" $) | nindent 4 }} + failurePolicy: {{ .failurePolicy }} + matchPolicy: {{ .matchPolicy }} + name: resourcepools.projectcapsule.dev + namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }} + objectSelector: {{ toYaml .objectSelector | nindent 4 }} + reinvocationPolicy: {{ .reinvocationPolicy }} + rules: + - apiGroups: + - "capsule.clastix.io" + apiVersions: + - "*" + operations: + - CREATE + - UPDATE + resources: + - resourcepools + scope: '*' + sideEffects: None + timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }} + {{- end }} + {{- with .Values.webhooks.hooks.resourcepools.claims }} +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + {{- include "capsule.webhooks.service" (dict "path" "/resourcepool/claim/mutating" "ctx" $) | nindent 4 }} + failurePolicy: {{ .failurePolicy }} + matchPolicy: {{ .matchPolicy }} + name: resourcepoolclaims.projectcapsule.dev + namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }} + objectSelector: {{ toYaml .objectSelector | nindent 4 }} + reinvocationPolicy: {{ .reinvocationPolicy }} + rules: + - apiGroups: + - "capsule.clastix.io" + apiVersions: + - "*" + operations: + - CREATE + - UPDATE + resources: + - resourcepoolclaims + scope: '*' + sideEffects: None + timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }} + {{- end }} {{- end }} diff --git a/charts/capsule/templates/rbac-tenants.yaml b/charts/capsule/templates/rbac-tenants.yaml new file mode 100644 index 00000000..d0cb93f7 --- /dev/null +++ b/charts/capsule/templates/rbac-tenants.yaml @@ -0,0 +1,24 @@ +{{- if $.Values.rbac.resourcepoolclaims.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "capsule.fullname" $ }}-resourcepoolclaims + labels: + {{- toYaml $.Values.rbac.resourcepoolclaims.labels | nindent 4 }} +rules: +- apiGroups: ["capsule.clastix.io"] + resources: ["resourcepoolclaims"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +{{- end }} +{{- if $.Values.rbac.resources.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "capsule.fullname" $ }}-resources + labels: + {{- toYaml $.Values.rbac.resources.labels | nindent 4 }} +rules: +- apiGroups: ["capsule.clastix.io"] + resources: ["tenantresources"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +{{- end }} diff --git a/charts/capsule/templates/validatingwebhookconfiguration.yaml b/charts/capsule/templates/validatingwebhookconfiguration.yaml index 5f737c81..34db4e49 100644 --- a/charts/capsule/templates/validatingwebhookconfiguration.yaml +++ b/charts/capsule/templates/validatingwebhookconfiguration.yaml @@ -274,7 +274,7 @@ webhooks: sideEffects: None timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} {{- end }} -{{- with .Values.webhooks.hooks.tenants }} + {{- with .Values.webhooks.hooks.tenants }} - admissionReviewVersions: - v1 - v1beta1 @@ -299,7 +299,57 @@ webhooks: scope: '*' sideEffects: None timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} -{{- end }} + {{- end }} + {{- with .Values.webhooks.hooks.resourcepools.pools }} +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + {{- include "capsule.webhooks.service" (dict "path" "/resourcepool/validating" "ctx" $) | nindent 4 }} + failurePolicy: {{ .failurePolicy }} + matchPolicy: {{ .matchPolicy }} + name: resourcepools.projectcapsule.dev + namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }} + objectSelector: {{ toYaml .objectSelector | nindent 4 }} + rules: + - apiGroups: + - "capsule.clastix.io" + apiVersions: + - "*" + operations: + - CREATE + - UPDATE + resources: + - resourcepools + scope: '*' + sideEffects: None + timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} + {{- end }} + {{- with .Values.webhooks.hooks.resourcepools.pools }} +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + {{- include "capsule.webhooks.service" (dict "path" "/resourcepool/claim/validating" "ctx" $) | nindent 4 }} + failurePolicy: {{ .failurePolicy }} + matchPolicy: {{ .matchPolicy }} + name: resourcepoolclaims.projectcapsule.dev + namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }} + objectSelector: {{ toYaml .objectSelector | nindent 4 }} + rules: + - apiGroups: + - "capsule.clastix.io" + apiVersions: + - "*" + operations: + - CREATE + - UPDATE + resources: + - resourcepoolclaims + scope: '*' + sideEffects: None + timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} + {{- end }} {{- with .Values.webhooks.hooks.customresources }} - admissionReviewVersions: - v1 @@ -332,4 +382,4 @@ webhooks: sideEffects: None timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/capsule/values.schema.json b/charts/capsule/values.schema.json index 221a5d52..06ddcbd8 100644 --- a/charts/capsule/values.schema.json +++ b/charts/capsule/values.schema.json @@ -375,6 +375,43 @@ }, "type": "object" }, + "rbac": { + "properties": { + "resourcepoolclaims": { + "properties": { + "create": { + "type": "boolean" + }, + "labels": { + "properties": { + "rbac.authorization.k8s.io/aggregate-to-admin": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "resources": { + "properties": { + "create": { + "type": "boolean" + }, + "labels": { + "properties": { + "rbac.authorization.k8s.io/aggregate-to-admin": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, "replicaCount": { "type": "integer" }, @@ -817,6 +854,55 @@ }, "type": "object" }, + "resourcepools": { + "properties": { + "claims": { + "properties": { + "failurePolicy": { + "type": "string" + }, + "matchPolicy": { + "type": "string" + }, + "namespaceSelector": { + "properties": {}, + "type": "object" + }, + "objectSelector": { + "properties": {}, + "type": "object" + }, + "reinvocationPolicy": { + "type": "string" + } + }, + "type": "object" + }, + "pools": { + "properties": { + "failurePolicy": { + "type": "string" + }, + "matchPolicy": { + "type": "string" + }, + "namespaceSelector": { + "properties": {}, + "type": "object" + }, + "objectSelector": { + "properties": {}, + "type": "object" + }, + "reinvocationPolicy": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "services": { "properties": { "failurePolicy": { diff --git a/charts/capsule/values.yaml b/charts/capsule/values.yaml index 6ea0b1bc..c78539a5 100644 --- a/charts/capsule/values.yaml +++ b/charts/capsule/values.yaml @@ -76,6 +76,17 @@ proxy: # -- Enable Installation of Capsule Proxy enabled: false +# These are ClusterRoles which grant permissions for Capsule CRDs to Tenant Owners +rbac: + resources: + create: false + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + resourcepoolclaims: + create: false + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + # Manager Options manager: @@ -265,6 +276,19 @@ webhooks: # Hook Configuration hooks: + resourcepools: + pools: + namespaceSelector: {} + objectSelector: {} + reinvocationPolicy: Never + matchPolicy: Equivalent + failurePolicy: Fail + claims: + namespaceSelector: {} + objectSelector: {} + reinvocationPolicy: Never + matchPolicy: Equivalent + failurePolicy: Fail namespaceOwnerReference: failurePolicy: Fail customresources: @@ -353,6 +377,7 @@ webhooks: - key: capsule.clastix.io/tenant operator: Exists + # ServiceMonitor serviceMonitor: # -- Enable ServiceMonitor diff --git a/main.go b/cmd/main.go similarity index 92% rename from main.go rename to cmd/main.go index fc7e9331..00a79278 100644 --- a/main.go +++ b/cmd/main.go @@ -35,12 +35,14 @@ import ( podlabelscontroller "github.com/projectcapsule/capsule/controllers/pod" "github.com/projectcapsule/capsule/controllers/pv" rbaccontroller "github.com/projectcapsule/capsule/controllers/rbac" + "github.com/projectcapsule/capsule/controllers/resourcepools" "github.com/projectcapsule/capsule/controllers/resources" servicelabelscontroller "github.com/projectcapsule/capsule/controllers/servicelabels" tenantcontroller "github.com/projectcapsule/capsule/controllers/tenant" tlscontroller "github.com/projectcapsule/capsule/controllers/tls" "github.com/projectcapsule/capsule/pkg/configuration" "github.com/projectcapsule/capsule/pkg/indexer" + "github.com/projectcapsule/capsule/pkg/metrics" "github.com/projectcapsule/capsule/pkg/webhook" "github.com/projectcapsule/capsule/pkg/webhook/defaults" "github.com/projectcapsule/capsule/pkg/webhook/gateway" @@ -51,6 +53,7 @@ import ( "github.com/projectcapsule/capsule/pkg/webhook/node" "github.com/projectcapsule/capsule/pkg/webhook/pod" "github.com/projectcapsule/capsule/pkg/webhook/pvc" + "github.com/projectcapsule/capsule/pkg/webhook/resourcepool" "github.com/projectcapsule/capsule/pkg/webhook/route" "github.com/projectcapsule/capsule/pkg/webhook/service" "github.com/projectcapsule/capsule/pkg/webhook/tenant" @@ -195,6 +198,7 @@ func main() { if err = (&tenantcontroller.Manager{ RESTConfig: manager.GetConfig(), Client: manager.GetClient(), + Metrics: metrics.MustMakeTenantRecorder(), Log: ctrl.Log.WithName("controllers").WithName("Tenant"), Recorder: manager.GetEventRecorderFor("tenant-controller"), }).SetupWithManager(manager); err != nil { @@ -236,6 +240,10 @@ func main() { route.CustomResources(tenant.ResourceCounterHandler(manager.GetClient())), route.Gateway(gateway.Class(cfg)), route.Defaults(defaults.Handler(cfg, kubeVersion)), + route.ResourcePoolMutation((resourcepool.PoolMutationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepool")))), + route.ResourcePoolValidation((resourcepool.PoolValidationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepool")))), + route.ResourcePoolClaimMutation((resourcepool.ClaimMutationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepoolclaims")))), + route.ResourcePoolClaimValidation((resourcepool.ClaimValidationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepoolclaims")))), ) nodeWebhookSupported, _ := utils.NodeWebhookSupported(kubeVersion) @@ -304,6 +312,15 @@ func main() { os.Exit(1) } + if err := resourcepools.Add( + ctrl.Log.WithName("controllers").WithName("ResourcePools"), + manager, + manager.GetEventRecorderFor("pools-ctrl"), + ); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "resourcepools") + os.Exit(1) + } + setupLog.Info("starting manager") if err = manager.Start(ctx); err != nil { diff --git a/version.go b/cmd/version.go similarity index 100% rename from version.go rename to cmd/version.go diff --git a/controllers/resourcepools/claim_controller.go b/controllers/resourcepools/claim_controller.go new file mode 100644 index 00000000..b817af3c --- /dev/null +++ b/controllers/resourcepools/claim_controller.go @@ -0,0 +1,294 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resourcepools + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + "github.com/projectcapsule/capsule/pkg/api" + "github.com/projectcapsule/capsule/pkg/meta" + "github.com/projectcapsule/capsule/pkg/metrics" +) + +type resourceClaimController struct { + client.Client + metrics *metrics.ClaimRecorder + log logr.Logger + recorder record.EventRecorder +} + +func (r *resourceClaimController) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&capsulev1beta2.ResourcePoolClaim{}). + Watches( + &capsulev1beta2.ResourcePool{}, + handler.EnqueueRequestsFromMapFunc(r.claimsWithoutPoolFromNamespaces), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Complete(r) +} + +func (r resourceClaimController) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) { + log := r.log.WithValues("Request.Name", request.Name) + + instance := &capsulev1beta2.ResourcePoolClaim{} + if err = r.Get(ctx, request.NamespacedName, instance); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Request object not found, could have been deleted after reconcile request") + + r.metrics.DeleteClaimMetric(request.Name) + + return reconcile.Result{}, nil + } + + log.Error(err, "Error reading the object") + + return + } + + // Ensuring the Quota Status + err = r.reconcile(ctx, log, instance) + + // Emit a Metric in any case + r.metrics.RecordClaimCondition(instance) + + if err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, err +} + +// Trigger claims from a namespace, which are not yet allocated. +// when a resourcepool updates it's status. +func (r *resourceClaimController) claimsWithoutPoolFromNamespaces(ctx context.Context, obj client.Object) []reconcile.Request { + pool, ok := obj.(*capsulev1beta2.ResourcePool) + if !ok { + return nil + } + + var requests []reconcile.Request + + for _, ns := range pool.Status.Namespaces { + claimList := &capsulev1beta2.ResourcePoolClaimList{} + if err := r.List(ctx, claimList, client.InNamespace(ns)); err != nil { + r.log.Error(err, "Failed to list claims in namespace", "namespace", ns) + + continue + } + + for _, claim := range claimList.Items { + if claim.Status.Pool.UID == "" { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: claim.Namespace, + Name: claim.Name, + }, + }) + } + } + } + + return requests +} + +// This Controller is responsible for assigning Claims to ResourcePools. +// Everything else will be handeled by the ResourcePool Controller. +func (r resourceClaimController) reconcile( + ctx context.Context, + log logr.Logger, + claim *capsulev1beta2.ResourcePoolClaim, +) (err error) { + pool, err := r.evaluateResourcePool(ctx, claim) + if err != nil { + claim.Status.Pool = api.StatusNameUID{} + + cond := meta.NewAssignedCondition(claim) + cond.Status = metav1.ConditionFalse + cond.Reason = meta.FailedReason + cond.Message = err.Error() + + return updateStatusAndEmitEvent( + ctx, + r.Client, + r.recorder, + claim, + cond, + ) + } + + return r.allocateResourcePool(ctx, log, claim, pool) +} + +// Verify a Pool can be allocated. +func (r resourceClaimController) evaluateResourcePool( + ctx context.Context, + claim *capsulev1beta2.ResourcePoolClaim, +) (pool *capsulev1beta2.ResourcePool, err error) { + poolName := claim.Spec.Pool + + if poolName == "" { + err = fmt.Errorf("no pool reference was defined") + + return pool, err + } + + pool = &capsulev1beta2.ResourcePool{} + if err := r.Get(ctx, client.ObjectKey{ + Name: poolName, + }, pool); err != nil { + return nil, err + } + + if !pool.DeletionTimestamp.IsZero() { + return nil, fmt.Errorf( + "resourcepool not available", + ) + } + + allowed := false + + for _, ns := range pool.Status.Namespaces { + if ns == claim.GetNamespace() { + allowed = true + + continue + } + } + + if !allowed { + return nil, fmt.Errorf( + "resourcepool not available", + ) + } + + // Validates if Resources can be allocated in the first place + for resourceName := range claim.Spec.ResourceClaims { + _, exists := pool.Status.Allocation.Hard[resourceName] + if !exists { + return nil, fmt.Errorf( + "resource %s is not available in pool %s", + resourceName, + pool.Name, + ) + } + } + + return pool, err +} + +func (r resourceClaimController) allocateResourcePool( + ctx context.Context, + log logr.Logger, + cl *capsulev1beta2.ResourcePoolClaim, + pool *capsulev1beta2.ResourcePool, +) (err error) { + allocate := api.StatusNameUID{ + Name: api.Name(pool.GetName()), + UID: pool.GetUID(), + } + + if !meta.HasLooseOwnerReference(cl, pool) { + log.V(5).Info("adding ownerreference for", "pool", pool.Name) + + patch := client.MergeFrom(cl.DeepCopy()) + + if err := meta.SetLooseOwnerReference(cl, pool, r.Scheme()); err != nil { + return err + } + + if err := r.Patch(ctx, cl, patch); err != nil { + return err + } + } + + if cl.Status.Pool.Name == allocate.Name && + cl.Status.Pool.UID == allocate.UID { + return nil + } + + cond := meta.NewAssignedCondition(cl) + cond.Status = metav1.ConditionTrue + cond.Reason = meta.SucceededReason + + // Set claim pool in status and condition + cl.Status = capsulev1beta2.ResourcePoolClaimStatus{ + Pool: allocate, + Condition: cond, + } + + // Update status in a separate call + if err := r.Client.Status().Update(ctx, cl); err != nil { + return err + } + + return nil +} + +// Update the Status of a claim and emit an event if Status changed. +func updateStatusAndEmitEvent( + ctx context.Context, + c client.Client, + recorder record.EventRecorder, + claim *capsulev1beta2.ResourcePoolClaim, + condition metav1.Condition, +) (err error) { + if claim.Status.Condition.Type == condition.Type && + claim.Status.Condition.Status == condition.Status && + claim.Status.Condition.Reason == condition.Reason && + claim.Status.Condition.Message == condition.Message { + return nil + } + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + current := &capsulev1beta2.ResourcePoolClaim{} + if err := c.Get(ctx, client.ObjectKeyFromObject(claim), current); err != nil { + return fmt.Errorf("failed to refetch instance before update: %w", err) + } + + current.Status.Condition = condition + + return c.Status().Update(ctx, current) + }) + + claim.Status.Condition = condition + + if err != nil { + return err + } + + eventType := corev1.EventTypeNormal + if claim.Status.Condition.Status == metav1.ConditionFalse { + eventType = corev1.EventTypeWarning + } + + recorder.AnnotatedEventf( + claim, + map[string]string{ + "Status": string(claim.Status.Condition.Status), + "Type": claim.Status.Condition.Type, + }, + eventType, + claim.Status.Condition.Reason, + claim.Status.Condition.Message, + ) + + return +} diff --git a/controllers/resourcepools/manager.go b/controllers/resourcepools/manager.go new file mode 100644 index 00000000..b63e6265 --- /dev/null +++ b/controllers/resourcepools/manager.go @@ -0,0 +1,40 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resourcepools + +import ( + "fmt" + + "github.com/go-logr/logr" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/projectcapsule/capsule/pkg/metrics" +) + +func Add( + log logr.Logger, + mgr manager.Manager, + recorder record.EventRecorder, +) (err error) { + if err = (&resourcePoolController{ + Client: mgr.GetClient(), + log: log.WithName("Pools"), + recorder: recorder, + metrics: metrics.MustMakeResourcePoolRecorder(), + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("unable to create pool controller: %w", err) + } + + if err = (&resourceClaimController{ + Client: mgr.GetClient(), + log: log.WithName("Claims"), + recorder: recorder, + metrics: metrics.MustMakeClaimRecorder(), + }).SetupWithManager(mgr); err != nil { + return fmt.Errorf("unable to create claim controller: %w", err) + } + + return nil +} diff --git a/controllers/resourcepools/pool_controller.go b/controllers/resourcepools/pool_controller.go new file mode 100644 index 00000000..43e7f79a --- /dev/null +++ b/controllers/resourcepools/pool_controller.go @@ -0,0 +1,771 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resourcepools + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/go-logr/logr" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + "github.com/projectcapsule/capsule/pkg/api" + "github.com/projectcapsule/capsule/pkg/meta" + "github.com/projectcapsule/capsule/pkg/metrics" + "github.com/projectcapsule/capsule/pkg/utils" +) + +type resourcePoolController struct { + client.Client + metrics *metrics.ResourcePoolRecorder + log logr.Logger + recorder record.EventRecorder +} + +func (r *resourcePoolController) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&capsulev1beta2.ResourcePool{}). + Owns(&corev1.ResourceQuota{}). + Watches(&capsulev1beta2.ResourcePoolClaim{}, + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &capsulev1beta2.ResourcePool{}), + ). + Watches(&corev1.Namespace{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { + // Fetch all GlobalResourceQuota objects + grqList := &capsulev1beta2.ResourcePoolList{} + if err := mgr.GetClient().List(ctx, grqList); err != nil { + r.log.Error(err, "Failed to list ResourcePools objects") + + return nil + } + + // Enqueue a reconcile request for each GlobalResourceQuota + var requests []reconcile.Request + for _, grq := range grqList.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&grq), + }) + } + + return requests + }), + ). + Complete(r) +} + +func (r resourcePoolController) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) { + log := r.log.WithValues("Request.Name", request.Name) + // Fetch the Tenant instance + instance := &capsulev1beta2.ResourcePool{} + if err = r.Get(ctx, request.NamespacedName, instance); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Request object not found, could have been deleted after reconcile request") + + r.metrics.DeleteResourcePoolMetric(request.Name) + + return reconcile.Result{}, nil + } + + log.Error(err, "Error reading the object") + + return + } + + // ResourceQuota Reconciliation + reconcileErr := r.reconcile(ctx, log, instance) + + r.metrics.ResourceUsageMetrics(instance) + + // Always Post Status + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + current := &capsulev1beta2.ResourcePool{} + if err := r.Get(ctx, client.ObjectKeyFromObject(instance), current); err != nil { + return fmt.Errorf("failed to refetch instance before update: %w", err) + } + + current.Status = instance.Status + + return r.Client.Status().Update(ctx, current) + }) + + if reconcileErr != nil || err != nil { + log.V(3).Info("Failed to reconcile ResourcePool", "error", err) + + return ctrl.Result{}, reconcileErr + } + + err = r.finalize(ctx, instance) + + return ctrl.Result{}, err +} + +func (r *resourcePoolController) finalize( + ctx context.Context, + pool *capsulev1beta2.ResourcePool, +) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + // Re-fetch latest version of the object + latest := &capsulev1beta2.ResourcePool{} + if err := r.Get(ctx, client.ObjectKeyFromObject(pool), latest); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + + return err + } + + changed := false + + // Case: all claims are gone, remove finalizer + if latest.Status.ClaimSize == 0 && controllerutil.ContainsFinalizer(latest, meta.ControllerFinalizer) { + controllerutil.RemoveFinalizer(latest, meta.ControllerFinalizer) + + changed = true + } + + // Case: claims still exist, add finalizer if not already present + if latest.Status.ClaimSize > 0 && !controllerutil.ContainsFinalizer(latest, meta.ControllerFinalizer) { + controllerutil.AddFinalizer(latest, meta.ControllerFinalizer) + + changed = true + } + + if changed { + return r.Update(ctx, latest) + } + + return nil + }) +} + +func (r *resourcePoolController) reconcile( + ctx context.Context, + log logr.Logger, + pool *capsulev1beta2.ResourcePool, +) (err error) { + r.handlePoolHardResources(pool) + + namespaces, err := r.gatherMatchingNamespaces(ctx, log, pool) + if err != nil { + log.Error(err, "Can not get matching namespaces") + + return err + } + + currentNamespaces := make(map[string]struct{}, len(namespaces)) + for _, ns := range namespaces { + currentNamespaces[ns.Name] = struct{}{} + } + + claims, err := r.gatherMatchingClaims(ctx, log, pool, currentNamespaces) + if err != nil { + log.Error(err, "Can not get matching namespaces") + + return err + } + + log.V(5).Info("Collected assigned claims", "count", len(claims)) + + if err := r.garbageCollection(ctx, log, pool, claims, currentNamespaces); err != nil { + log.Error(err, "Failed to garbage collect ResourceQuotas") + + return err + } + + pool.AssignNamespaces(namespaces) + + // Sort by creation timestamp (oldest first) + sort.Slice(claims, func(i, j int) bool { + return claims[i].CreationTimestamp.Before(&claims[j].CreationTimestamp) + }) + + // Keeps track of resources which are exhausted by previous resource + // This is only required when Ordered is active + queuedResourcesMap := make(map[string]resource.Quantity) + + // You can now iterate over `allClaims` in order + for _, claim := range claims { + log.Info("Found claim", "name", claim.Name, "namespace", claim.Namespace, "created", claim.CreationTimestamp) + + err = r.reconcileResourceClaim(ctx, log.WithValues("Claim", claim.Name), pool, &claim, queuedResourcesMap) + if err != nil { + log.Error(err, "Failed to reconcile ResourceQuotaClaim", "claim", claim.Name) + } + } + + pool.CalculateClaimedResources() + pool.AssignClaims() + + return r.syncResourceQuotas(ctx, r.Client, pool, namespaces) +} + +// Reconciles a single ResourceClaim. +func (r *resourcePoolController) reconcileResourceClaim( + ctx context.Context, + log logr.Logger, + pool *capsulev1beta2.ResourcePool, + claim *capsulev1beta2.ResourcePoolClaim, + exhaustion map[string]resource.Quantity, +) (err error) { + t := pool.GetClaimFromStatus(claim) + if t != nil { + // TBD: Future Implementation for Claim Resizing here + return r.handleClaimToPoolBinding(ctx, pool, claim) + } + + // Verify if a resource was already exhausted by a previous claim + if *pool.Spec.Config.OrderedQueue { + var queued bool + + queued, err = r.handleClaimOrderedExhaustion( + ctx, + claim, + exhaustion, + ) + if err != nil { + return err + } + + if queued { + log.V(5).Info("Claim is queued", "claim", claim.Name) + + return nil + } + } + + // Check if Resources can be Assigned (Enough Resources to claim) + exhaustions := r.canClaimWithinNamespace(log, pool, claim) + if len(exhaustions) != 0 { + log.V(5).Info("exhausting resources", "amount", len(exhaustions)) + + return r.handleClaimResourceExhaustion( + ctx, + pool, + claim, + exhaustions, + exhaustion, + ) + } + + return r.handleClaimToPoolBinding(ctx, pool, claim) +} + +func (r *resourcePoolController) canClaimWithinNamespace( + log logr.Logger, + pool *capsulev1beta2.ResourcePool, + claim *capsulev1beta2.ResourcePoolClaim, +) (res map[string]PoolExhaustionResource) { + claimable := pool.GetAvailableClaimableResources() + log.V(5).Info("claimable resources", "claimable", claimable) + + _, namespaceClaimed := pool.GetNamespaceClaims(claim.Namespace) + log.V(5).Info("namespace claimed resources", "claimed", namespaceClaimed) + + res = make(map[string]PoolExhaustionResource) + + for resourceName, req := range claim.Spec.ResourceClaims { + // Verify if total Quota is available + available, exists := claimable[resourceName] + if !exists || available.IsZero() || available.Cmp(req) < 0 { + log.V(5).Info("not enough resources available", "available", available, "requesting", req) + + res[resourceName.String()] = PoolExhaustionResource{ + Available: available, + Requesting: req, + Namespace: false, + } + + continue + } + } + + return +} + +// Handles exhaustions when a exhaustion was already declared in the given map. +func (r *resourcePoolController) handleClaimOrderedExhaustion( + ctx context.Context, + claim *capsulev1beta2.ResourcePoolClaim, + exhaustion map[string]resource.Quantity, +) (queued bool, err error) { + status := make([]string, 0) + + for resourceName, qt := range claim.Spec.ResourceClaims { + req, ok := exhaustion[resourceName.String()] + if !ok { + continue + } + + line := fmt.Sprintf( + "requested: %s=%s, queued: %s=%s", + resourceName, + qt.String(), + resourceName, + req.String(), + ) + status = append(status, line) + } + + if len(status) != 0 { + queued = true + + cond := meta.NewBoundCondition(claim) + cond.Status = metav1.ConditionFalse + cond.Reason = meta.QueueExhaustedReason + cond.Message = strings.Join(status, "; ") + + return queued, updateStatusAndEmitEvent(ctx, r.Client, r.recorder, claim, cond) + } + + return +} + +func (r *resourcePoolController) handleClaimResourceExhaustion( + ctx context.Context, + pool *capsulev1beta2.ResourcePool, + claim *capsulev1beta2.ResourcePoolClaim, + exhaustions map[string]PoolExhaustionResource, + exhaustion map[string]resource.Quantity, +) (err error) { + status := make([]string, 0) + + resourceNames := make([]string, 0) + for resourceName := range exhaustions { + resourceNames = append(resourceNames, resourceName) + } + + sort.Strings(resourceNames) + + for _, resourceName := range resourceNames { + ex := exhaustions[resourceName] + + if *pool.Spec.Config.OrderedQueue { + ext, ok := exhaustion[resourceName] + if ok { + ext.Add(ex.Requesting) + } else { + ext = ex.Requesting + } + + exhaustion[resourceName] = ext + } + + line := fmt.Sprintf( + "requested: %s=%s, available: %s=%s", + resourceName, + ex.Requesting.String(), + resourceName, + ex.Available.String(), + ) + + status = append(status, line) + } + + if len(status) != 0 { + cond := meta.NewBoundCondition(claim) + cond.Status = metav1.ConditionFalse + cond.Reason = meta.PoolExhaustedReason + cond.Message = strings.Join(status, "; ") + + return updateStatusAndEmitEvent(ctx, r.Client, r.recorder, claim, cond) + } + + return err +} + +func (r *resourcePoolController) handleClaimToPoolBinding( + ctx context.Context, + pool *capsulev1beta2.ResourcePool, + claim *capsulev1beta2.ResourcePoolClaim, +) (err error) { + cond := meta.NewBoundCondition(claim) + cond.Status = metav1.ConditionTrue + cond.Reason = meta.SucceededReason + cond.Message = "Claimed resources" + + if err = updateStatusAndEmitEvent(ctx, r.Client, r.recorder, claim, cond); err != nil { + return + } + + pool.AddClaimToStatus(claim) + + return +} + +// Attempts to garbage collect a ResourceQuota resource. +func (r *resourcePoolController) handleClaimDisassociation( + ctx context.Context, + log logr.Logger, + pool *capsulev1beta2.ResourcePool, + claim *capsulev1beta2.ResourcePoolClaimsItem, +) error { + current := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim.Name.String(), + Namespace: claim.Namespace.String(), + UID: claim.UID, + }, + } + + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := r.Get(ctx, types.NamespacedName{ + Name: claim.Name.String(), + Namespace: claim.Namespace.String(), + }, current); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + + return fmt.Errorf("failed to refetch claim before patch: %w", err) + } + + if !*pool.Spec.Config.DeleteBoundResources || meta.ReleaseAnnotationTriggers(current) { + patch := client.MergeFrom(current.DeepCopy()) + meta.RemoveLooseOwnerReference(current, pool) + meta.ReleaseAnnotationRemove(current) + + if err := r.Patch(ctx, current, patch); err != nil { + return fmt.Errorf("failed to patch claim: %w", err) + } + } + + current.Status.Pool = api.StatusNameUID{} + if err := r.Client.Status().Update(ctx, current); err != nil { + return fmt.Errorf("failed to update claim status: %w", err) + } + + r.recorder.AnnotatedEventf( + current, + map[string]string{ + "Status": string(metav1.ConditionFalse), + "Type": meta.NotReadyCondition, + }, + corev1.EventTypeNormal, + "Disassociated", + "Claim is disassociated from the pool", + ) + + return nil + }) + if err != nil { + log.Info("Removing owner reference failed", "claim", current.Name, "pool", pool.Name, "error", err) + + return err + } + + pool.RemoveClaimFromStatus(current) + + return nil +} + +// Synchronize resources quotas in all the given namespaces (routines). +func (r *resourcePoolController) syncResourceQuotas( + ctx context.Context, + c client.Client, + quota *capsulev1beta2.ResourcePool, + namespaces []corev1.Namespace, +) (err error) { + group := new(errgroup.Group) + + for _, ns := range namespaces { + namespace := ns + + group.Go(func() error { + return r.syncResourceQuota(ctx, c, quota, namespace) + }) + } + + return group.Wait() +} + +// Synchronize a single resourcequota. +func (r *resourcePoolController) syncResourceQuota( + ctx context.Context, + c client.Client, + pool *capsulev1beta2.ResourcePool, + namespace corev1.Namespace, +) (err error) { + // getting ResourceQuota labels for the mutateFn + var quotaLabel string + + if quotaLabel, err = utils.GetTypeLabel(&capsulev1beta2.ResourcePool{}); err != nil { + return err + } + + target := &corev1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: namespace.GetName(), + }, + } + + if err := c.Get(ctx, types.NamespacedName{Name: target.Name, Namespace: target.Namespace}, target); err != nil && !apierrors.IsNotFound(err) { + return err + } + + err = retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) { + _, retryErr = controllerutil.CreateOrUpdate(ctx, c, target, func() (err error) { + targetLabels := target.GetLabels() + if targetLabels == nil { + targetLabels = map[string]string{} + } + + targetLabels[quotaLabel] = pool.Name + + target.SetLabels(targetLabels) + target.Spec.Scopes = pool.Spec.Quota.Scopes + target.Spec.ScopeSelector = pool.Spec.Quota.ScopeSelector + + // Assign to resourcequota all the claims + defaults + target.Spec.Hard = pool.GetResourceQuotaHardResources(namespace.GetName()) + + return controllerutil.SetControllerReference(pool, target, c.Scheme()) + }) + + return retryErr + }) + if err != nil { + return err + } + + return nil +} + +// Handles new allocated resources before they are passed on to the pool itself. +// It does not verify the same stuff, as the admission for resourcepools. +func (r *resourcePoolController) handlePoolHardResources(pool *capsulev1beta2.ResourcePool) { + if &pool.Status.Allocation.Hard != &pool.Spec.Quota.Hard { + for resourceName := range pool.Status.Allocation.Hard { + if _, ok := pool.Spec.Quota.Hard[resourceName]; !ok { + r.metrics.DeleteResourcePoolSingleResourceMetric(pool.Name, resourceName.String()) + } + } + } + + pool.Status.Allocation.Hard = pool.Spec.Quota.Hard +} + +// Get Currently selected namespaces for the resourcepool. +func (r *resourcePoolController) gatherMatchingNamespaces( + ctx context.Context, + log logr.Logger, + pool *capsulev1beta2.ResourcePool, +) (namespaces []corev1.Namespace, err error) { + // Collect Namespaces (Matching) + namespaces = make([]corev1.Namespace, 0) + seenNamespaces := make(map[string]struct{}) + + if !pool.DeletionTimestamp.IsZero() { + return + } + + for _, selector := range pool.Spec.Selectors { + selected, serr := selector.GetMatchingNamespaces(ctx, r.Client) + if serr != nil { + log.Error(err, "Cannot get matching namespaces") + + continue + } + + for _, ns := range selected { + if !ns.DeletionTimestamp.IsZero() { + continue + } + + if _, exists := seenNamespaces[ns.Name]; exists { + continue + } + + seenNamespaces[ns.Name] = struct{}{} + + namespaces = append(namespaces, ns) + } + } + + return +} + +// Get Currently selected claims for the resourcepool. +func (r *resourcePoolController) gatherMatchingClaims( + ctx context.Context, + log logr.Logger, + pool *capsulev1beta2.ResourcePool, + namespaces map[string]struct{}, +) (claims []capsulev1beta2.ResourcePoolClaim, err error) { + if !pool.DeletionTimestamp.IsZero() { + return claims, err + } + + claimList := &capsulev1beta2.ResourcePoolClaimList{} + if err := r.List(ctx, claimList, client.MatchingFieldsSelector{ + Selector: fields.OneTermEqualSelector(".status.pool.uid", string(pool.GetUID())), + }); err != nil { + log.Error(err, "failed to list ResourceQuotaClaims") + + return claims, err + } + + filteredClaims := make([]capsulev1beta2.ResourcePoolClaim, 0) + + for _, claim := range claimList.Items { + if meta.ReleaseAnnotationTriggers(&claim) { + continue + } + + if _, ok := namespaces[claim.Namespace]; !ok { + continue + } + + filteredClaims = append(filteredClaims, claim) + } + + // Sort by creation timestamp (oldest first) + sort.Slice(filteredClaims, func(i, j int) bool { + a := filteredClaims[i] + b := filteredClaims[j] + + // First, sort by CreationTimestamp + if !a.CreationTimestamp.Equal(&b.CreationTimestamp) { + return a.CreationTimestamp.Before(&b.CreationTimestamp) + } + + // Tiebreaker: use name as a stable secondary sort - If CreationTimestamp is equal + // (e.g., when two claims are created at the same time in Gitops environments or CI/CD pipelines) + if a.Name != b.Name { + return a.Name < b.Name + } + + return a.Namespace < b.Namespace + }) + + return filteredClaims, nil +} + +// Attempts to garbage collect a ResourceQuota resource. +func (r *resourcePoolController) garbageCollection( + ctx context.Context, + log logr.Logger, + pool *capsulev1beta2.ResourcePool, + claims []capsulev1beta2.ResourcePoolClaim, + namespaces map[string]struct{}, +) error { + activeClaims := make(map[string]struct{}, len(claims)) + for _, claim := range claims { + activeClaims[string(claim.UID)] = struct{}{} + } + + log.V(5).Info("available items", "namespaces", namespaces, "claims", activeClaims) + + namespaceMarkedForGC := make(map[string]bool, len(pool.Status.Namespaces)) + + for _, ns := range pool.Status.Namespaces { + _, exists := namespaces[ns] + if !exists { + log.V(5).Info("garbage collecting namespace", "namespace", ns) + + namespaceMarkedForGC[ns] = true + + if err := r.garbageCollectNamespace(ctx, pool, ns); err != nil { + r.log.Error(err, "Failed to garbage collect resource quota", "namespace", ns) + + return err + } + } + } + + // Garbage collect namespaces which no longer match selector + for ns, clms := range pool.Status.Claims { + nsMarked := namespaceMarkedForGC[ns] + + for _, cl := range clms { + _, claimActive := activeClaims[string(cl.UID)] + + if nsMarked || !claimActive { + log.V(5).Info("Disassociating claim", "claim", cl.Name, "namespace", ns, "uid", cl.UID, "nsGC", nsMarked, "claimGC", claimActive) + + cl.Namespace = api.Name(ns) + if err := r.handleClaimDisassociation(ctx, log, pool, cl); err != nil { + r.log.Error(err, "Failed to disassociate claim", "namespace", ns, "uid", cl.UID) + + return err + } + } + } + + if nsMarked || len(pool.Status.Claims[ns]) == 0 { + delete(pool.Status.Claims, ns) + } + } + + // We can recalculate the usage in the end + // Since it's only going to decrease + pool.CalculateClaimedResources() + + return nil +} + +// Attempts to garbage collect a ResourceQuota resource. +func (r *resourcePoolController) garbageCollectNamespace( + ctx context.Context, + pool *capsulev1beta2.ResourcePool, + namespace string, +) error { + r.metrics.DeleteResourcePoolNamespaceMetric(pool.Name, namespace) + + // Check if the namespace still exists + ns := &corev1.Namespace{} + if err := r.Get(ctx, types.NamespacedName{Name: namespace}, ns); err != nil { + if apierrors.IsNotFound(err) { + r.log.V(5).Info("Namespace does not exist, skipping garbage collection", "namespace", namespace) + + return nil + } + + return fmt.Errorf("failed to check namespace existence: %w", err) + } + + name := utils.PoolResourceQuotaName(pool) + + // Attempt to delete the ResourceQuota + target := &corev1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := r.Get(ctx, types.NamespacedName{Namespace: namespace, Name: target.GetName()}, target) + if err != nil { + if apierrors.IsNotFound(err) { + r.log.V(5).Info("ResourceQuota already deleted", "namespace", namespace, "name", name) + + return nil + } + + return err + } + + // Delete the ResourceQuota + if err := r.Delete(ctx, target); err != nil { + return fmt.Errorf("failed to delete ResourceQuota %s in namespace %s: %w", name, namespace, err) + } + + return nil +} diff --git a/controllers/resourcepools/types.go b/controllers/resourcepools/types.go new file mode 100644 index 00000000..21cd3638 --- /dev/null +++ b/controllers/resourcepools/types.go @@ -0,0 +1,16 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resourcepools + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +type PoolExhaustion map[string]PoolExhaustionResource + +type PoolExhaustionResource struct { + Namespace bool + Available resource.Quantity + Requesting resource.Quantity +} diff --git a/controllers/tenant/manager.go b/controllers/tenant/manager.go index 1ccb3e8b..fec372c9 100644 --- a/controllers/tenant/manager.go +++ b/controllers/tenant/manager.go @@ -25,6 +25,7 @@ import ( type Manager struct { client.Client + Metrics *metrics.TenantRecorder Log logr.Logger Recorder record.EventRecorder RESTConfig *rest.Config @@ -51,8 +52,7 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct r.Log.Info("Request object not found, could have been deleted after reconcile request") // If tenant was deleted or cannot be found, clean up metrics - metrics.TenantResourceUsage.DeletePartialMatch(map[string]string{"tenant": request.Name}) - metrics.TenantResourceLimit.DeletePartialMatch(map[string]string{"tenant": request.Name}) + r.Metrics.DeleteTenantMetric(request.Name) return reconcile.Result{}, nil } diff --git a/controllers/tenant/resourcequotas.go b/controllers/tenant/resourcequotas.go index ec7abbc8..05b63eb7 100644 --- a/controllers/tenant/resourcequotas.go +++ b/controllers/tenant/resourcequotas.go @@ -23,7 +23,6 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" "github.com/projectcapsule/capsule/pkg/api" - "github.com/projectcapsule/capsule/pkg/metrics" "github.com/projectcapsule/capsule/pkg/utils" ) @@ -54,14 +53,13 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2 } // Remove prior metrics, to avoid cleaning up for metrics of deleted ResourceQuotas - metrics.TenantResourceUsage.DeletePartialMatch(map[string]string{"tenant": tenant.Name}) - metrics.TenantResourceLimit.DeletePartialMatch(map[string]string{"tenant": tenant.Name}) + r.Metrics.DeleteTenantMetric(tenant.Name) // Expose the namespace quota and usage as metrics for the tenant - metrics.TenantResourceUsage.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(tenant.Status.Size)) + r.Metrics.TenantResourceUsageGauge.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(tenant.Status.Size)) if tenant.Spec.NamespaceOptions != nil && tenant.Spec.NamespaceOptions.Quota != nil { - metrics.TenantResourceLimit.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(*tenant.Spec.NamespaceOptions.Quota)) + r.Metrics.TenantResourceLimitGauge.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(*tenant.Spec.NamespaceOptions.Quota)) } //nolint:nestif @@ -99,6 +97,7 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2 return scopeErr } + // Iterating over all the options declared for the ResourceQuota, // summing all the used quota across different Namespaces to determinate // if we're hitting a Hard quota at Tenant level. @@ -116,13 +115,13 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2 r.Log.Info("Computed " + name.String() + " quota for the whole Tenant is " + quantity.String()) // Expose usage and limit metrics for the resource (name) of the ResourceQuota (index) - metrics.TenantResourceUsage.WithLabelValues( + r.Metrics.TenantResourceUsageGauge.WithLabelValues( tenant.Name, name.String(), strconv.Itoa(index), ).Set(float64(quantity.MilliValue()) / 1000) - metrics.TenantResourceLimit.WithLabelValues( + r.Metrics.TenantResourceLimitGauge.WithLabelValues( tenant.Name, name.String(), strconv.Itoa(index), diff --git a/e2e/additional_role_bindings_test.go b/e2e/additional_role_bindings_test.go index 2a192c6a..1e5ee1a0 100644 --- a/e2e/additional_role_bindings_test.go +++ b/e2e/additional_role_bindings_test.go @@ -16,7 +16,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a Namespace with an additional Role Binding", func() { +var _ = Describe("creating a Namespace with an additional Role Binding", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "additional-role-binding", diff --git a/e2e/allowed_external_ips_test.go b/e2e/allowed_external_ips_test.go index b9937a1b..9d8833b2 100644 --- a/e2e/allowed_external_ips_test.go +++ b/e2e/allowed_external_ips_test.go @@ -16,7 +16,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("enforcing an allowed set of Service external IPs", func() { +var _ = Describe("enforcing an allowed set of Service external IPs", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "allowed-external-ip", diff --git a/e2e/container_registry_test.go b/e2e/container_registry_test.go index c1aa7b12..7d8b7d82 100644 --- a/e2e/container_registry_test.go +++ b/e2e/container_registry_test.go @@ -23,7 +23,7 @@ type Patch struct { Value string `json:"value"` } -var _ = Describe("enforcing a Container Registry", func() { +var _ = Describe("enforcing a Container Registry", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "container-registry", diff --git a/e2e/custom_capsule_group_test.go b/e2e/custom_capsule_group_test.go index f4966949..fd5adb8c 100644 --- a/e2e/custom_capsule_group_test.go +++ b/e2e/custom_capsule_group_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace as Tenant owner with custom --capsule-group", func() { +var _ = Describe("creating a Namespace as Tenant owner with custom --capsule-group", Label("config"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-assigned-custom-group", diff --git a/e2e/custom_resource_quota_test.go b/e2e/custom_resource_quota_test.go index 4242d786..4968329d 100644 --- a/e2e/custom_resource_quota_test.go +++ b/e2e/custom_resource_quota_test.go @@ -21,7 +21,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("when Tenant limits custom Resource Quota", func() { +var _ = Describe("when Tenant limits custom Resource Quota", Label("resourcequota"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "limiting-resources", @@ -100,7 +100,7 @@ var _ = Describe("when Tenant limits custom Resource Quota", func() { dynamicClient := dynamic.NewForConfigOrDie(cfg) for _, i := range []int{1, 2, 3} { - ns := NewNamespace(fmt.Sprintf("resource-ns-%d", i)) + ns := NewNamespace(fmt.Sprintf("limiting-resources-ns-%d", i)) NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed()) TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName())) @@ -122,7 +122,7 @@ var _ = Describe("when Tenant limits custom Resource Quota", func() { } for _, i := range []int{1, 2, 3} { - ns := NewNamespace(fmt.Sprintf("resource-ns-%d", i)) + ns := NewNamespace(fmt.Sprintf("limiting-resources-ns-%d", i)) obj := &unstructured.Unstructured{ Object: map[string]interface{}{ diff --git a/e2e/disable_externalname_test.go b/e2e/disable_externalname_test.go index 3f93b39a..f0963009 100644 --- a/e2e/disable_externalname_test.go +++ b/e2e/disable_externalname_test.go @@ -17,7 +17,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating an ExternalName service when it is disabled for Tenant", func() { +var _ = Describe("creating an ExternalName service when it is disabled for Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "disable-external-service", diff --git a/e2e/disable_ingress_wildcard_test.go b/e2e/disable_ingress_wildcard_test.go index d00b8654..bdf68d0d 100644 --- a/e2e/disable_ingress_wildcard_test.go +++ b/e2e/disable_ingress_wildcard_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("creating an Ingress with a wildcard when it is denied for the Tenant", func() { +var _ = Describe("creating an Ingress with a wildcard when it is denied for the Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "denied-ingress-wildcard", diff --git a/e2e/disable_loadbalancer_test.go b/e2e/disable_loadbalancer_test.go index 2901d2a7..95d64083 100644 --- a/e2e/disable_loadbalancer_test.go +++ b/e2e/disable_loadbalancer_test.go @@ -17,7 +17,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a LoadBalancer service when it is disabled for Tenant", func() { +var _ = Describe("creating a LoadBalancer service when it is disabled for Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "disable-loadbalancer-service", diff --git a/e2e/disable_node_ports_test.go b/e2e/disable_node_ports_test.go index 145e8105..14df0aae 100644 --- a/e2e/disable_node_ports_test.go +++ b/e2e/disable_node_ports_test.go @@ -17,7 +17,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a nodePort service when it is disabled for Tenant", func() { +var _ = Describe("creating a nodePort service when it is disabled for Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "disable-node-ports", diff --git a/e2e/dynamic_tenant_owner_clusterroles_test.go b/e2e/dynamic_tenant_owner_clusterroles_test.go index 59f8de8f..cf6688df 100644 --- a/e2e/dynamic_tenant_owner_clusterroles_test.go +++ b/e2e/dynamic_tenant_owner_clusterroles_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("defining dynamic Tenant Owner Cluster Roles", func() { +var _ = Describe("defining dynamic Tenant Owner Cluster Roles", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "dynamic-tenant-owner-clusterroles", diff --git a/e2e/enable_loadbalancer_test.go b/e2e/enable_loadbalancer_test.go index c8fe9207..bcee59b7 100644 --- a/e2e/enable_loadbalancer_test.go +++ b/e2e/enable_loadbalancer_test.go @@ -17,7 +17,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a LoadBalancer service when it is enabled for Tenant", func() { +var _ = Describe("creating a LoadBalancer service when it is enabled for Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "enable-loadbalancer-service", diff --git a/e2e/enable_node_ports_test.go b/e2e/enable_node_ports_test.go index 411a07ba..720015e5 100644 --- a/e2e/enable_node_ports_test.go +++ b/e2e/enable_node_ports_test.go @@ -15,7 +15,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a nodePort service when it is enabled for Tenant", func() { +var _ = Describe("creating a nodePort service when it is enabled for Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "enable-node-ports", diff --git a/e2e/forbidden_annotations_regex_test.go b/e2e/forbidden_annotations_regex_test.go index ebf175f0..49761f7c 100644 --- a/e2e/forbidden_annotations_regex_test.go +++ b/e2e/forbidden_annotations_regex_test.go @@ -14,7 +14,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a tenant with various forbidden regexes", func() { +var _ = Describe("creating a tenant with various forbidden regexes", Label("tenant"), func() { //errorRegexes := []string{ // "(.*gitops|.*nsm).[k8s.io/((?!(resource)).*|trusted)](http://k8s.io/((?!(resource)).*%7Ctrusted))", //} diff --git a/e2e/force_tenant_prefix_tenant_scope_test.go b/e2e/force_tenant_prefix_tenant_scope_test.go index 5f95dcd3..3aaaae13 100644 --- a/e2e/force_tenant_prefix_tenant_scope_test.go +++ b/e2e/force_tenant_prefix_tenant_scope_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace with Tenant name prefix enforcement at Tenant scope", func() { +var _ = Describe("creating a Namespace with Tenant name prefix enforcement at Tenant scope", Label("tenant", "config"), func() { t1 := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "awesome", diff --git a/e2e/force_tenant_prefix_test.go b/e2e/force_tenant_prefix_test.go index 1088d7a2..85b477b4 100644 --- a/e2e/force_tenant_prefix_test.go +++ b/e2e/force_tenant_prefix_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace with Tenant name prefix enforcement", func() { +var _ = Describe("creating a Namespace with Tenant name prefix enforcement", Label("tenant"), func() { t1 := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "awesome", diff --git a/e2e/imagepullpolicy_multiple_test.go b/e2e/imagepullpolicy_multiple_test.go index 8ed78865..0d0eadfe 100644 --- a/e2e/imagepullpolicy_multiple_test.go +++ b/e2e/imagepullpolicy_multiple_test.go @@ -15,7 +15,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("enforcing some defined ImagePullPolicy", func() { +var _ = Describe("enforcing some defined ImagePullPolicy", Label("pod"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "image-pull-policies", diff --git a/e2e/imagepullpolicy_single_test.go b/e2e/imagepullpolicy_single_test.go index 2a099434..3477343b 100644 --- a/e2e/imagepullpolicy_single_test.go +++ b/e2e/imagepullpolicy_single_test.go @@ -15,7 +15,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("enforcing a defined ImagePullPolicy", func() { +var _ = Describe("enforcing a defined ImagePullPolicy", Label("pod"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "image-pull-policy", diff --git a/e2e/ingress_class_extensions_test.go b/e2e/ingress_class_extensions_test.go index 0b4e0f65..2a6abed4 100644 --- a/e2e/ingress_class_extensions_test.go +++ b/e2e/ingress_class_extensions_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("when Tenant handles Ingress classes with extensions/v1beta1", func() { +var _ = Describe("when Tenant handles Ingress classes with extensions/v1beta1", Label("ingress"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "ingress-class-extensions-v1beta1", diff --git a/e2e/ingress_class_networking_test.go b/e2e/ingress_class_networking_test.go index fd7d6c36..a9b53514 100644 --- a/e2e/ingress_class_networking_test.go +++ b/e2e/ingress_class_networking_test.go @@ -24,7 +24,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("when Tenant handles Ingress classes with networking.k8s.io/v1", func() { +var _ = Describe("when Tenant handles Ingress classes with networking.k8s.io/v1", Label("ingress"), func() { tntNoDefault := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "ic-selector-networking-v1", diff --git a/e2e/ingress_hostnames_collision_cluster_scope_test.go b/e2e/ingress_hostnames_collision_cluster_scope_test.go index 4cda7f67..256178fd 100644 --- a/e2e/ingress_hostnames_collision_cluster_scope_test.go +++ b/e2e/ingress_hostnames_collision_cluster_scope_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("when handling Cluster scoped Ingress hostnames collision", func() { +var _ = Describe("when handling Cluster scoped Ingress hostnames collision", Label("ingress"), func() { tnt1 := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "hostnames-collision-cluster-one", diff --git a/e2e/ingress_hostnames_collision_disabled_test.go b/e2e/ingress_hostnames_collision_disabled_test.go index 5c0606b3..4777124f 100644 --- a/e2e/ingress_hostnames_collision_disabled_test.go +++ b/e2e/ingress_hostnames_collision_disabled_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("when disabling Ingress hostnames collision", func() { +var _ = Describe("when disabling Ingress hostnames collision", Label("ingress"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "hostnames-collision-disabled", diff --git a/e2e/ingress_hostnames_collision_namespace_scope_test.go b/e2e/ingress_hostnames_collision_namespace_scope_test.go index c624b14d..ce6be35a 100644 --- a/e2e/ingress_hostnames_collision_namespace_scope_test.go +++ b/e2e/ingress_hostnames_collision_namespace_scope_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("when handling Namespace scoped Ingress hostnames collision", func() { +var _ = Describe("when handling Namespace scoped Ingress hostnames collision", Label("ingress"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "hostnames-collision-namespace", diff --git a/e2e/ingress_hostnames_collision_tenant_scope_test.go b/e2e/ingress_hostnames_collision_tenant_scope_test.go index 3ad6f435..3a070c44 100644 --- a/e2e/ingress_hostnames_collision_tenant_scope_test.go +++ b/e2e/ingress_hostnames_collision_tenant_scope_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("when handling Tenant scoped Ingress hostnames collision", func() { +var _ = Describe("when handling Tenant scoped Ingress hostnames collision", Label("ingress"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "hostnames-collision-tenant", diff --git a/e2e/ingress_hostnames_test.go b/e2e/ingress_hostnames_test.go index aab742e0..c694ddb4 100644 --- a/e2e/ingress_hostnames_test.go +++ b/e2e/ingress_hostnames_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("when Tenant handles Ingress hostnames", func() { +var _ = Describe("when Tenant handles Ingress hostnames", Label("ingress"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "ingress-hostnames", diff --git a/e2e/missing_tenant_test.go b/e2e/missing_tenant_test.go index 548563ac..de98dc0e 100644 --- a/e2e/missing_tenant_test.go +++ b/e2e/missing_tenant_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace creation with no Tenant assigned", func() { +var _ = Describe("creating a Namespace creation with no Tenant assigned", Label("tenant"), func() { It("should fail", func() { tnt := &capsulev1beta2.Tenant{ Spec: capsulev1beta2.TenantSpec{ diff --git a/e2e/namespace_additional_metadata_test.go b/e2e/namespace_additional_metadata_test.go index 65298ff7..75a394f6 100644 --- a/e2e/namespace_additional_metadata_test.go +++ b/e2e/namespace_additional_metadata_test.go @@ -15,7 +15,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a Namespace for a Tenant with additional metadata", func() { +var _ = Describe("creating a Namespace for a Tenant with additional metadata", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-metadata", diff --git a/e2e/namespace_capsule_label_test.go b/e2e/namespace_capsule_label_test.go index 8e1b8c3c..9e09238e 100644 --- a/e2e/namespace_capsule_label_test.go +++ b/e2e/namespace_capsule_label_test.go @@ -15,7 +15,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating several Namespaces for a Tenant", func() { +var _ = Describe("creating several Namespaces for a Tenant", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "capsule-labels", diff --git a/e2e/namespace_hijacking_test.go b/e2e/namespace_hijacking_test.go index 194a8a5e..e628b49b 100644 --- a/e2e/namespace_hijacking_test.go +++ b/e2e/namespace_hijacking_test.go @@ -17,7 +17,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -var _ = Describe("creating several Namespaces for a Tenant", func() { +var _ = Describe("creating several Namespaces for a Tenant", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "capsule-ns-attack-1", diff --git a/e2e/namespace_metadata_controller_test.go b/e2e/namespace_metadata_controller_test.go index 39e99ab9..610826ae 100644 --- a/e2e/namespace_metadata_controller_test.go +++ b/e2e/namespace_metadata_controller_test.go @@ -1,5 +1,3 @@ -//go:build e2e - // Copyright 2020-2023 Project Capsule Authors. // SPDX-License-Identifier: Apache-2.0 @@ -7,6 +5,7 @@ package e2e import ( "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -16,7 +15,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a Namespace for a Tenant with additional metadata", func() { +var _ = Describe("creating a Namespace for a Tenant with additional metadata", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-metadata", @@ -92,4 +91,4 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", f }, defaultTimeoutInterval, defaultPollInterval).Should(BeTrue()) }) }) -}) \ No newline at end of file +}) diff --git a/e2e/namespace_metadata_webhook_test.go b/e2e/namespace_metadata_webhook_test.go index 1ecb0ec7..68c2d458 100644 --- a/e2e/namespace_metadata_webhook_test.go +++ b/e2e/namespace_metadata_webhook_test.go @@ -17,7 +17,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a Namespace for a Tenant with additional metadata", func() { +var _ = Describe("creating a Namespace for a Tenant with additional metadata", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-metadata", @@ -70,12 +70,12 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", f TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName())) By("checking additional labels", func() { - Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.GetName()}, ns)).Should(Succeed()) + Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.GetName()}, ns)).Should(Succeed()) - for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Labels { - Expect(ns.Labels).To(HaveKeyWithValue(k, v)) - } - return + for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Labels { + Expect(ns.Labels).To(HaveKeyWithValue(k, v)) + } + return }) By("checking additional annotations", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.GetName()}, ns)).Should(Succeed()) diff --git a/e2e/namespace_user_metadata_test.go b/e2e/namespace_user_metadata_test.go index 3b6b3e3e..db76a015 100644 --- a/e2e/namespace_user_metadata_test.go +++ b/e2e/namespace_user_metadata_test.go @@ -17,7 +17,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a Namespace with user-specified labels and annotations", func() { +var _ = Describe("creating a Namespace with user-specified labels and annotations", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-user-metadata-forbidden", diff --git a/e2e/new_namespace_test.go b/e2e/new_namespace_test.go index a90a9640..d3c77609 100644 --- a/e2e/new_namespace_test.go +++ b/e2e/new_namespace_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespaces as different type of Tenant owners", func() { +var _ = Describe("creating a Namespaces as different type of Tenant owners", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-assigned", diff --git a/e2e/node_user_metadata_test.go b/e2e/node_user_metadata_test.go index d9ef53c8..009c2b85 100644 --- a/e2e/node_user_metadata_test.go +++ b/e2e/node_user_metadata_test.go @@ -18,7 +18,7 @@ import ( "github.com/projectcapsule/capsule/pkg/webhook/utils" ) -var _ = Describe("modifying node labels and annotations", func() { +var _ = Describe("modifying node labels and annotations", Label("config", "nodes"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-node-user-metadata-forbidden", diff --git a/e2e/overquota_namespace_test.go b/e2e/overquota_namespace_test.go index 8a98451f..3f838d98 100644 --- a/e2e/overquota_namespace_test.go +++ b/e2e/overquota_namespace_test.go @@ -14,7 +14,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace in over-quota of three", func() { +var _ = Describe("creating a Namespace in over-quota of three", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "over-quota-tenant", diff --git a/e2e/owner_webhooks_test.go b/e2e/owner_webhooks_test.go index e473e5ef..e2e35e9d 100644 --- a/e2e/owner_webhooks_test.go +++ b/e2e/owner_webhooks_test.go @@ -19,7 +19,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("when Tenant owner interacts with the webhooks", func() { +var _ = Describe("when Tenant owner interacts with the webhooks", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-owner", diff --git a/e2e/pod_metadata_test.go b/e2e/pod_metadata_test.go index 39d35c9e..64873795 100644 --- a/e2e/pod_metadata_test.go +++ b/e2e/pod_metadata_test.go @@ -16,7 +16,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -var _ = Describe("adding metadata to Pod objects", func() { +var _ = Describe("adding metadata to Pod objects", Label("pod"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-metadata", diff --git a/e2e/pod_priority_class_test.go b/e2e/pod_priority_class_test.go index d778652c..699fadd8 100644 --- a/e2e/pod_priority_class_test.go +++ b/e2e/pod_priority_class_test.go @@ -21,7 +21,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("enforcing a Priority Class", func() { +var _ = Describe("enforcing a Priority Class", Label("pod"), func() { tntWithDefaults := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "priority-class-defaults", diff --git a/e2e/pod_runtime_class_test.go b/e2e/pod_runtime_class_test.go index 0ffe189d..47b482fc 100644 --- a/e2e/pod_runtime_class_test.go +++ b/e2e/pod_runtime_class_test.go @@ -19,7 +19,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("enforcing a Runtime Class", func() { +var _ = Describe("enforcing a Runtime Class", Label("pod"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "runtime-class", diff --git a/e2e/preventing_pv_cross_tenant_mount_test.go b/e2e/preventing_pv_cross_tenant_mount_test.go index 30359f31..1699fb64 100644 --- a/e2e/preventing_pv_cross_tenant_mount_test.go +++ b/e2e/preventing_pv_cross_tenant_mount_test.go @@ -18,7 +18,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("preventing PersistentVolume cross-tenant mount", func() { +var _ = Describe("preventing PersistentVolume cross-tenant mount", Label("tenant", "storage"), func() { tnt1 := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "pv-one", diff --git a/e2e/protected_namespace_regex_test.go b/e2e/protected_namespace_regex_test.go index adc26acc..2cad2206 100644 --- a/e2e/protected_namespace_regex_test.go +++ b/e2e/protected_namespace_regex_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace with a protected Namespace regex enabled", func() { +var _ = Describe("creating a Namespace with a protected Namespace regex enabled", Label("namespace"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-protected-namespace", diff --git a/e2e/resource_quota_exceeded_test.go b/e2e/resource_quota_exceeded_test.go index 29a104d2..ad14dd8d 100644 --- a/e2e/resource_quota_exceeded_test.go +++ b/e2e/resource_quota_exceeded_test.go @@ -21,7 +21,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("exceeding a Tenant resource quota", func() { +var _ = Describe("exceeding a Tenant resource quota", Label("resourcequota"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-resources-changes", diff --git a/e2e/resourcepool_test.go b/e2e/resourcepool_test.go new file mode 100644 index 00000000..92b17f4b --- /dev/null +++ b/e2e/resourcepool_test.go @@ -0,0 +1,2026 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + "github.com/projectcapsule/capsule/pkg/api" + "github.com/projectcapsule/capsule/pkg/meta" + "github.com/projectcapsule/capsule/pkg/utils" +) + +var _ = Describe("ResourcePool Tests", Label("resourcepool"), func() { + JustAfterEach(func() { + Eventually(func() error { + poolList := &capsulev1beta2.TenantList{} + labelSelector := client.MatchingLabels{"e2e-resourcepool": "test"} + if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil { + return err + } + + for _, pool := range poolList.Items { + if err := k8sClient.Delete(context.TODO(), &pool); err != nil { + return err + } + } + + return nil + }, "30s", "5s").Should(Succeed()) + + Eventually(func() error { + poolList := &capsulev1beta2.ResourcePoolList{} + labelSelector := client.MatchingLabels{"e2e-resourcepool": "test"} + if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil { + return err + } + + for _, pool := range poolList.Items { + if err := k8sClient.Delete(context.TODO(), &pool); err != nil { + return err + } + } + + return nil + }, "30s", "5s").Should(Succeed()) + + Eventually(func() error { + poolList := &corev1.NamespaceList{} + labelSelector := client.MatchingLabels{"e2e-resourcepool": "test"} + if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil { + return err + } + + for _, pool := range poolList.Items { + if err := k8sClient.Delete(context.TODO(), &pool); err != nil { + return err + } + } + + return nil + }, "30s", "5s").Should(Succeed()) + + }) + + It("Assign Defaults correctly", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "defaults-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "defaults-pool", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "defaults-pool", + }, + }, + }, + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + namespaces := []string{"ns-1-default-pool", "ns-2-default-pool", "ns-3-default-pool"} + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Get Applied revision", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + }) + + By("Has no Finalizer", func() { + Expect(controllerutil.ContainsFinalizer(pool, meta.ControllerFinalizer)).To(BeFalse()) + }) + + By("Verify Defaults were set", func() { + Expect(pool.Spec.Defaults).To(BeNil()) + }) + + By("Verify Status was correctly initialized", func() { + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + }, + Available: pool.Spec.Quota.Hard, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Create Namespaces, which are selected by the pool", func() { + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-1-default-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "defaults-pool", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed()) + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-default-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "defaults-pool", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed()) + + ns3 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-3-default-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "defaults-pool", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns3) + Expect(err).Should(Succeed()) + }) + + By("Verify Namespaces are shown as allowed targets", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(namespaces, pool.Status.Namespaces) + Expect(ok).To(BeTrue(), "Mismatch for expected namespaces: %s", msg) + + Expect(pool.Status.NamespaceSize).To(Equal(uint(3))) + }) + + By("Verify ResourceQuotas for namespaces", func() { + quotaLabel, err := utils.GetTypeLabel(&capsulev1beta2.ResourcePool{}) + Expect(err).Should(Succeed()) + + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns}, + rq) + Expect(err).Should(Succeed()) + + Expect(rq.ObjectMeta.Labels[quotaLabel]).To(Equal(pool.Name), "Expected "+quotaLabel+" to be set to "+pool.Name) + + Expect(rq.Spec.Hard).To(BeNil()) + + found := false + for _, ref := range rq.OwnerReferences { + if ref.Kind == "ResourcePool" && ref.UID == pool.UID { + found = true + break + } + } + Expect(found).To(BeTrue(), "Expected ResourcePool to be owner of ResourceQuota in namespace %s", ns) + } + }) + + By("Add Claims for namespaces", func() { + claim1 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-1-default-pool", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim1) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim1) + + isSuccessfullyBoundToPool(pool, claim1) + + claim2 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-2", + Namespace: "ns-2-default-pool", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + }, + }, + } + + err = k8sClient.Create(context.TODO(), claim2) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim2) + + isSuccessfullyBoundToPool(pool, claim2) + + claim3 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-3", + Namespace: "ns-3-default-pool", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("10Gi"), + }, + }, + } + + err = k8sClient.Create(context.TODO(), claim3) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim3) + + Expect(isBoundToPool(pool, claim3)).To(BeFalse()) + }) + + By("Verify Status was correctly initialized", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("640Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1408Mi"), + }, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Pool Has Finalizer", func() { + Expect(controllerutil.ContainsFinalizer(pool, meta.ControllerFinalizer)).To(BeTrue()) + }) + + By("Verify ResourceQuotas for namespaces", func() { + status := map[string]corev1.ResourceList{ + "ns-1-default-pool": corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + "ns-2-default-pool": corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + }, + "ns-3-default-pool": nil, + } + + quotaLabel, err := utils.GetTypeLabel(&capsulev1beta2.ResourcePool{}) + Expect(err).Should(Succeed()) + + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns}, + rq) + Expect(err).Should(Succeed()) + + Expect(rq.ObjectMeta.Labels[quotaLabel]).To(Equal(pool.Name), "Expected "+quotaLabel+" to be set to "+pool.Name) + + ok, msg := DeepCompare(status[ns], rq.Spec.Hard) + Expect(ok).To(BeTrue(), "Mismatch for resources for resourcequota: %s", msg) + + found := false + for _, ref := range rq.OwnerReferences { + if ref.Kind == "ResourcePool" && ref.UID == pool.UID { + found = true + break + } + } + Expect(found).To(BeTrue(), "Expected ResourcePool to be owner of ResourceQuota in namespace %s", ns) + } + }) + + By("Update the ResourcePool", func() { + pool.Spec.Defaults = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("1"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsStorage: resource.MustParse("5Gi"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to update ResourcePool %s", pool) + }) + + By("Verify ResourceQuotas for namespaces", func() { + status := map[string]corev1.ResourceList{ + + "ns-1-default-pool": corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + corev1.ResourceLimitsMemory: resource.MustParse("1152Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("1"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsStorage: resource.MustParse("5Gi"), + }, + "ns-2-default-pool": corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + corev1.ResourceLimitsMemory: resource.MustParse("1536Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("1"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsStorage: resource.MustParse("5Gi"), + }, + "ns-3-default-pool": corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("1"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsStorage: resource.MustParse("5Gi"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns}, + rq) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(status[ns], rq.Spec.Hard) + Expect(ok).To(BeTrue(), "Mismatch for resources for resourcequota: %s", msg) + } + }) + + By("Remove namespace from being selected (Patch Labels)", func() { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-default-pool", + }, + } + + err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.Name}, ns) + Expect(err).Should(Succeed()) + + ns.ObjectMeta.Labels = map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "do-not-select", + } + + err = k8sClient.Update(context.TODO(), ns) + Expect(err).Should(Succeed()) + }) + + By("Verify Namespaces was removed as allowed targets", func() { + expected := []string{"ns-1-default-pool", "ns-3-default-pool"} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(expected, pool.Status.Namespaces) + Expect(ok).To(BeTrue(), "Mismatch for expected namespaces: %s", msg) + + Expect(pool.Status.NamespaceSize).To(Equal(uint(2))) + Expect(pool.Status.ClaimSize).To(Equal(uint(1))) + }) + + By("Verify ResourceQuota was cleaned up", func() { + rq := &corev1.ResourceQuota{} + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: "ns-2-default-pool", + }, rq) + }, "30s", "1s").ShouldNot(Succeed(), "Expected ResourceQuota to be deleted from namespace %s", "ns-2-default-pool") + }) + + By("Verify Status was correctly initialized", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1920Mi"), + }, + } + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Remove namespace from being selected (Delete Namespace)", func() { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-3-default-pool", + }, + } + + err := k8sClient.Delete(context.TODO(), ns) + Expect(err).Should(Succeed()) + }) + + By("Get Applied revision", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + }) + + By("Verify Namespaces was removed as allowed targets", func() { + expected := []string{"ns-1-default-pool"} + + ok, msg := DeepCompare(expected, pool.Status.Namespaces) + Expect(ok).To(BeTrue(), "Mismatch for expected namespaces: %s", msg) + + Expect(pool.Status.NamespaceSize).To(Equal(uint(1))) + }) + + By("Delete Resourcepool", func() { + err := k8sClient.Delete(context.TODO(), pool) + Expect(err).Should(Succeed()) + }) + + By("Ensure ResourceQuotas are cleaned up", func() { + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns, + }, rq) + }, "30s", "1s").ShouldNot(Succeed(), "Expected ResourceQuota to be deleted from namespace %s", ns) + } + }) + }) + + It("Assigns Defaults correctly (DefaultsZero)", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-defaults-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "no-defaults", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "no-defaults", + }, + }, + }, + }, + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DefaultsAssignZero: ptr.To(true), + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + namespaces := []string{"ns-1-zero-pool", "ns-2-zero-pool"} + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Get Applied revision", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + }) + + By("Verify Defaults are empty", func() { + expected := corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + } + + Expect(pool.Spec.Defaults).To(Equal(expected)) + }) + + By("Verify Status was correctly initialized", func() { + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + }, + Available: pool.Spec.Quota.Hard, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Create Namespaces, which are selected by the pool", func() { + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-1-zero-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "no-defaults", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed()) + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-zero-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "no-defaults", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed()) + }) + + By("Verify Namespaces are shown as allowed targets", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(namespaces, pool.Status.Namespaces) + Expect(ok).To(BeTrue(), "Mismatch for expected namespaces: %s", msg) + + Expect(pool.Status.NamespaceSize).To(Equal(uint(2))) + }) + + By("Verify ResourceQuotas for namespaces", func() { + resources := corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + } + + quotaLabel, err := utils.GetTypeLabel(&capsulev1beta2.ResourcePool{}) + Expect(err).Should(Succeed()) + + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns}, + rq) + Expect(err).Should(Succeed()) + + Expect(rq.ObjectMeta.Labels[quotaLabel]).To(Equal(pool.Name), "Expected "+quotaLabel+" to be set to "+pool.Name) + + ok, msg := DeepCompare(resources, rq.Spec.Hard) + Expect(ok).To(BeTrue(), "Mismatch for resources for resourcequota: %s", msg) + + found := false + for _, ref := range rq.OwnerReferences { + if ref.Kind == "ResourcePool" && ref.UID == pool.UID { + found = true + break + } + } + Expect(found).To(BeTrue(), "Expected ResourcePool to be owner of ResourceQuota in namespace %s", ns) + + } + }) + + By("Delete Resourcepool", func() { + err := k8sClient.Delete(context.TODO(), pool) + Expect(err).Should(Succeed()) + }) + + By("Ensure ResourceQuotas are cleaned up", func() { + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns, + }, rq) + }, "30s", "1s").ShouldNot(Succeed(), "Expected ResourceQuota to be deleted from namespace %s", ns) + } + }) + + }) + + It("ResourcePool Scheduling - Unordered", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unordered-scheduling", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "unordered-scheduling", + }, + }, + }, + }, + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + OrderedQueue: ptr.To(false), + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Create source namespaces", func() { + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-1-pool-unordered", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "unordered-scheduling", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed(), "Failed to create Namespace %s", ns1) + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-pool-unordered", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "unordered-scheduling", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed(), "Failed to create Namespace %s", ns2) + + }) + + By("Create claim for limits.memory", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-1-pool-unordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + }) + + By("Verify Status was correctly initialized", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1920Mi"), + }, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Verify ResourceQuota", func() { + rqHardResources := corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + } + + rq := &corev1.ResourceQuota{} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: "ns-1-pool-unordered"}, + rq) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(rqHardResources, rq.Spec.Hard) + Expect(ok).To(BeTrue(), "Mismatch for resources for resourcequota: %s", msg) + }) + + By("Create claim exhausting requests.cpu", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-2", + Namespace: "ns-1-pool-unordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("4"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + Expect(isBoundToPool(pool, claim)).To(BeFalse()) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Message).To(Equal("requested: requests.cpu=4, available: requests.cpu=2"), "Actual message"+claim.Status.Condition.Message) + Expect(claim.Status.Condition.Reason).To(Equal(meta.PoolExhaustedReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + + By("Create claim for request.memory", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-3", + Namespace: "ns-2-pool-unordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + }) + + By("Verify Status was correctly initialized", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1920Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + }, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Create claim for requests.cpu (skip exhausting one)", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-4", + Namespace: "ns-2-pool-unordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("2"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + }) + + By("Verify Status was correctly initialized", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1920Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + }, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Reverify claim exhausting requests.cpu", func() { + claim := &capsulev1beta2.ResourcePoolClaim{} + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: "simple-2", Namespace: "ns-1-pool-unordered"}, claim) + Expect(err).Should(Succeed()) + + Expect(isBoundToPool(pool, claim)).To(BeFalse()) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Message).To(Equal("requested: requests.cpu=4, available: requests.cpu=0"), "Actual message"+claim.Status.Condition.Message) + Expect(claim.Status.Condition.Reason).To(Equal(meta.PoolExhaustedReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + }) + + It("ResourcePool Scheduling - Ordered", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ordered-scheduling", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "ordered-scheduling", + }, + }, + }, + }, + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + OrderedQueue: ptr.To(true), + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Create source namespaces", func() { + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-1-pool-ordered", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "ordered-scheduling", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed(), "Failed to create Namespace %s", ns1) + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-pool-ordered", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "ordered-scheduling", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed(), "Failed to create Namespace %s", ns2) + + }) + + By("Create claim for limits.memory", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-1-pool-ordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + }) + + By("Create claim for requests.requests", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-2-pool-ordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("750Mi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + }) + + By("Verify Status was correctly initialized", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("750Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("1298Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1536Mi"), + }, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Create claim exhausting requests.cpu", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-2", + Namespace: "ns-2-pool-ordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("4"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + Expect(isBoundToPool(pool, claim)).To(BeFalse()) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Message).To(Equal("requested: requests.cpu=4, available: requests.cpu=2"), "Actual message"+claim.Status.Condition.Message) + Expect(claim.Status.Condition.Reason).To(Equal(meta.PoolExhaustedReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + + By("Create claim exhausting limits.cpu", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-3", + Namespace: "ns-1-pool-ordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("4"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + Expect(isBoundToPool(pool, claim)).To(BeFalse()) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Message).To(Equal("requested: limits.cpu=4, available: limits.cpu=2"), "Actual message"+claim.Status.Condition.Message) + Expect(claim.Status.Condition.Reason).To(Equal(meta.PoolExhaustedReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + + By("Create claim for requests.cpu (attempt to skip exhausting one)", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-4", + Namespace: "ns-2-pool-ordered", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + Expect(isBoundToPool(pool, claim)).To(BeFalse()) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Message).To(Equal("requested: limits.cpu=2, queued: limits.cpu=4; requested: requests.cpu=2, queued: requests.cpu=4"), "Actual message"+claim.Status.Condition.Message) + Expect(claim.Status.Condition.Reason).To(Equal(meta.QueueExhaustedReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + + By("Verify ResourceQuotas for namespaces", func() { + namespaces := []string{"ns-1-pool-ordered", "ns-2-pool-ordered"} + + status := map[string]corev1.ResourceList{ + + "ns-1-pool-ordered": corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + }, + "ns-2-pool-ordered": corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("750Mi"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns}, + rq) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(status[ns], rq.Spec.Hard) + Expect(ok).To(BeTrue(), "Mismatch for resources for resourcequota: %s", msg) + } + }) + + By("Allocate more resources to Resourcepool (requests.cpu)", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("4"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("4"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to allocate Resourcepool %s", pool) + }) + + By("Verify Status was correctly initialized", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + expected := &capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: pool.Spec.Quota.Hard, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("4"), + corev1.ResourceRequestsMemory: resource.MustParse("750Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("4"), + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("1298Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("1536Mi"), + }, + } + + ok, msg := DeepCompare(*expected, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for expected status allocation: %s", msg) + }) + + By("Verify queued claim can be allocated", func() { + claim := &capsulev1beta2.ResourcePoolClaim{} + + err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: "simple-2", Namespace: "ns-2-pool-ordered"}, claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + }) + + By("Verify queued claim can be allocated", func() { + claim := &capsulev1beta2.ResourcePoolClaim{} + + err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: "simple-3", Namespace: "ns-1-pool-ordered"}, claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + }) + + By("Verify ResourceQuotas for namespaces", func() { + namespaces := []string{"ns-1-pool-ordered", "ns-2-pool-ordered"} + status := map[string]corev1.ResourceList{ + "ns-1-pool-ordered": corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("512Mi"), + corev1.ResourceLimitsCPU: resource.MustParse("4"), + }, + "ns-2-pool-ordered": corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("750Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("4"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + for _, ns := range namespaces { + rq := &corev1.ResourceQuota{} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{ + Name: utils.PoolResourceQuotaName(pool), + Namespace: ns}, + rq) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(status[ns], rq.Spec.Hard) + Expect(ok).To(BeTrue(), "Mismatch for resources for resourcequota: %s", msg) + } + }) + + By("Verify moved up in queue", func() { + claim := &capsulev1beta2.ResourcePoolClaim{} + + err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: "simple-4", Namespace: "ns-2-pool-ordered"}, claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + Expect(isBoundToPool(pool, claim)).To(BeFalse()) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Message).To(Equal("requested: limits.cpu=2, available: limits.cpu=0; requested: requests.cpu=2, available: requests.cpu=0"), "Actual message "+claim.Status.Condition.Message) + Expect(claim.Status.Condition.Reason).To(Equal(meta.PoolExhaustedReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + }) + + It("ResourcePool - Namespace Selection", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bind-ns-pool-1", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "bind-namespaces", + }, + }, + }, + }, + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DeleteBoundResources: ptr.To(false), + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Create source namespaces", func() { + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-1-pool-bind", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "bind-namespaces", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed(), "Failed to create Namespace %s", ns1) + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-pool-bind", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "bind-namespaces-no", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed(), "Failed to create Namespace %s", ns2) + }) + + By("Verify only matching namespaces", func() { + expected := []string{"ns-1-pool-bind"} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(expected, pool.Status.Namespaces) + Expect(ok).To(BeTrue(), "Mismatch for expected namespaces: %s", msg) + + Expect(pool.Status.NamespaceSize).To(Equal(uint(1))) + }) + + By("Create claim in matching namespace", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-1-pool-bind", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Reason).To(Equal(meta.SucceededReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionTrue)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + + By("Create claim non matching namespace", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-2-pool-bind", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + Pool: "bind-ns-pool-1", + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + Expect(isBoundToPool(pool, claim)).To(BeFalse()) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Reason).To(Equal(meta.FailedReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(claim.Status.Condition.Type).To(Equal(meta.AssignedCondition)) + }) + + By("Update Namespace Labels to become matching", func() { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-pool-bind", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "bind-namespaces", + }, + }, + } + + err := k8sClient.Update(context.TODO(), ns) + Expect(err).Should(Succeed(), "Failed to update namespace %s", ns) + }) + + By("Reverify claim in namespace", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-2-pool-bind", + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + isSuccessfullyBoundToPool(pool, claim) + }) + + }) + + It("ResourcePool Deletion - Not Cascading", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deletion-pool-1", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "delete-bound-resources", + }, + }, + }, + }, + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DeleteBoundResources: ptr.To(false), + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Unbinding From Pool", func() { + claim1 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "delete-1", + Namespace: "ns-1-pool-no-deletion", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + claim2 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "delete-2", + Namespace: "ns-2-pool-no-deletion", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim1.Namespace, + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "delete-bound-resources", + }, + }, + } + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim2.Namespace, + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "delete-bound-resources", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed()) + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed()) + err = k8sClient.Create(context.TODO(), claim1) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim1) + err = k8sClient.Create(context.TODO(), claim2) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim1) + + isBoundToPool(pool, claim1) + isBoundToPool(pool, claim2) + + err = k8sClient.Delete(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to delete Pool %s", claim1) + + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(claim1), &capsulev1beta2.ResourcePoolClaim{}) + }).Should(Succeed(), "Expected claim1 to be gone") + + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(claim2), &capsulev1beta2.ResourcePoolClaim{}) + }).Should(Succeed(), "Expected claim2 to be present") + + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(pool), &capsulev1beta2.ResourcePoolClaim{}) + }).ShouldNot(Succeed(), "Expected pool to be gone") + }) + }) + + It("ResourcePool Deletion - Cascading (DeleteBoundResources)", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deletion-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "delete-bound-resources", + }, + }, + }, + }, + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DeleteBoundResources: ptr.To(true), + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Cascading Deletion", func() { + claim1 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "delete-1", + Namespace: "ns-1-pool-deletion", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + claim2 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "delete-2", + Namespace: "ns-2-pool-deletion", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("128Mi"), + }, + }, + } + + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim1.Namespace, + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "delete-bound-resources", + }, + }, + } + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim2.Namespace, + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "delete-bound-resources", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed()) + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed()) + err = k8sClient.Create(context.TODO(), claim1) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim1) + err = k8sClient.Create(context.TODO(), claim2) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim1) + + isBoundToPool(pool, claim1) + isBoundToPool(pool, claim2) + + err = k8sClient.Delete(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to delete Pool %s", claim1) + + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(claim1), &capsulev1beta2.ResourcePoolClaim{}) + }).ShouldNot(Succeed(), "Expected claim1 to be gone") + + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(claim2), &capsulev1beta2.ResourcePoolClaim{}) + }).ShouldNot(Succeed(), "Expected claim2 to be gone") + + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(pool), &capsulev1beta2.ResourcePoolClaim{}) + }).ShouldNot(Succeed(), "Expected pool to be gone") + }) + }) + + It("Admission Guards ", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admission-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "admission", + }, + }, + }, + }, + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DefaultsAssignZero: ptr.To(true), + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Create Namespaces, which are selected by the pool", func() { + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-1-admission-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "admission", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed()) + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-admission-pool", + Labels: map[string]string{ + "e2e-resourcepool": "test", + "capsule.clastix.io/tenant": "admission", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed()) + }) + + By("Create claims", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-1", + Namespace: "ns-1-admission-pool", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Reason).To(Equal(meta.SucceededReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionTrue)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + + By("Create claims", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple-2", + Namespace: "ns-2-admission-pool", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + isSuccessfullyBoundToPool(pool, claim) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Status.Condition.Reason).To(Equal(meta.SucceededReason)) + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionTrue)) + Expect(claim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + }) + + By("Verify ResourcePool Status Allocation", func() { + expectedAllocation := capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("1536Mi"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(expectedAllocation, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for resource allocation: %s", msg) + }) + + By("Allow increasing the size of the pool", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("4"), + corev1.ResourceLimitsMemory: resource.MustParse("4Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to update ResourcePool %s", pool) + }) + + By("Verify ResourcePool Status Allocation", func() { + expectedAllocation := capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("4"), + corev1.ResourceLimitsMemory: resource.MustParse("4Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("4"), + corev1.ResourceLimitsMemory: resource.MustParse("3Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("1536Mi"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(expectedAllocation, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for resource allocation: %s", msg) + }) + + By("Allow Decreasing", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to update ResourcePool %s", pool) + }) + + By("Verify ResourcePool Status Allocation", func() { + expectedAllocation := capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("1536Mi"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(expectedAllocation, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for resource allocation: %s", msg) + }) + + By("Don't allow Decreasing under claimed usage", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("10Mi"), + corev1.ResourceRequestsCPU: resource.MustParse("0.5"), + corev1.ResourceRequestsMemory: resource.MustParse("128Mi"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).ShouldNot(Succeed(), "Update to ResourcePool %s should be blocked", pool) + }) + + By("May Remove unused resources", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to update ResourcePool %s", pool) + }) + + By("Verify ResourcePool Status Allocation", func() { + expectedAllocation := capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsMemory: resource.MustParse("1536Mi"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(expectedAllocation, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for resource allocation: %s", msg) + }) + + By("May Decrase to actual usage", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to update ResourcePool %s", pool) + }) + + By("Verify ResourcePool Status Allocation", func() { + expectedAllocation := capsulev1beta2.ResourcePoolQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + }, + Claimed: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsMemory: resource.MustParse("512Mi"), + }, + Available: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + }, + } + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + + ok, msg := DeepCompare(expectedAllocation, pool.Status.Allocation) + Expect(ok).To(BeTrue(), "Mismatch for resource allocation: %s", msg) + }) + + By("May not set 0 on usage", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).ShouldNot(Succeed(), "Update to ResourcePool %s should be blocked", pool) + }) + + By("May not remove resource in use", func() { + pool.Spec.Quota.Hard = corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1"), + } + + err := k8sClient.Update(context.TODO(), pool) + Expect(err).ShouldNot(Succeed(), "Update to ResourcePool %s should be blocked", pool) + }) + + }) +}) + +func isSuccessfullyBoundToPool(pool *capsulev1beta2.ResourcePool, claim *capsulev1beta2.ResourcePoolClaim) { + fetchedPool := &capsulev1beta2.ResourcePool{} + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, fetchedPool) + Expect(err).Should(Succeed()) + + fetchedClaim := &capsulev1beta2.ResourcePoolClaim{} + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, fetchedClaim) + Expect(err).Should(Succeed()) + + isBoundToPool(fetchedPool, fetchedClaim) + + Expect(fetchedClaim.Status.Pool.Name.String()).To(Equal(fetchedPool.Name)) + Expect(fetchedClaim.Status.Pool.UID).To(Equal(fetchedPool.GetUID())) + + Expect(fetchedClaim.Status.Condition.Type).To(Equal(meta.BoundCondition)) + Expect(fetchedClaim.Status.Condition.Status).To(Equal(metav1.ConditionTrue)) + Expect(fetchedClaim.Status.Condition.Reason).To(Equal(meta.SucceededReason)) +} + +func isBoundToPool(pool *capsulev1beta2.ResourcePool, claim *capsulev1beta2.ResourcePoolClaim) bool { + fetchedPool := &capsulev1beta2.ResourcePool{} + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, fetchedPool) + Expect(err).Should(Succeed()) + + fetchedClaim := &capsulev1beta2.ResourcePoolClaim{} + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, fetchedClaim) + Expect(err).Should(Succeed()) + + status := fetchedPool.GetClaimFromStatus(fetchedClaim) + if status == nil { + return false + } + + for name, cl := range status.Claims { + Expect(cl).To(Equal(fetchedClaim.Spec.ResourceClaims[name])) + } + + return true +} diff --git a/e2e/resourcepoolclaim_test.go b/e2e/resourcepoolclaim_test.go new file mode 100644 index 00000000..842dbb5d --- /dev/null +++ b/e2e/resourcepoolclaim_test.go @@ -0,0 +1,667 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + "github.com/projectcapsule/capsule/pkg/api" + "github.com/projectcapsule/capsule/pkg/meta" +) + +var _ = Describe("ResourcePoolClaim Tests", Label("resourcepool"), func() { + _ = &capsulev1beta2.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-claims-1", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + }, + }, + Spec: capsulev1beta2.TenantSpec{ + Owners: capsulev1beta2.OwnerListSpec{ + { + Name: "wind-user", + Kind: "User", + }, + }, + }, + } + + JustAfterEach(func() { + Eventually(func() error { + poolList := &capsulev1beta2.TenantList{} + labelSelector := client.MatchingLabels{"e2e-resourcepoolclaims": "test"} + if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil { + return err + } + + for _, pool := range poolList.Items { + if err := k8sClient.Delete(context.TODO(), &pool); err != nil { + return err + } + } + + return nil + }, "30s", "5s").Should(Succeed()) + + Eventually(func() error { + poolList := &capsulev1beta2.ResourcePoolList{} + labelSelector := client.MatchingLabels{"e2e-resourcepoolclaims": "test"} + if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil { + return err + } + + for _, pool := range poolList.Items { + if err := k8sClient.Delete(context.TODO(), &pool); err != nil { + return err + } + } + + return nil + }, "30s", "5s").Should(Succeed()) + + Eventually(func() error { + poolList := &corev1.NamespaceList{} + labelSelector := client.MatchingLabels{"e2e-resourcepoolclaims": "test"} + if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil { + return err + } + + for _, pool := range poolList.Items { + if err := k8sClient.Delete(context.TODO(), &pool); err != nil { + return err + } + } + + return nil + }, "30s", "5s").Should(Succeed()) + + }) + + It("Claim to Pool Assignment", func() { + pool1 := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-claims", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "claims-bindings", + }, + }, + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "claims-bindings-2", + }, + }, + }, + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + claim1 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "assign-pool-claim-1", + Namespace: "ns-1-pool-assign", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + Pool: "test-binding-claims", + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + }, + }, + } + + claim2 := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "assign-pool-claim-2", + Namespace: "ns-2-pool-assign", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + Pool: "test-binding-claims", + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + }, + }, + } + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool1) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool1) + }) + + By("Get Applied revision", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool1.Name}, pool1) + Expect(err).Should(Succeed()) + }) + + By("Create Namespaces, which are selected by the pool", func() { + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-1-pool-assign", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + "capsule.clastix.io/tenant": "claims-bindings", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns1) + Expect(err).Should(Succeed()) + + ns2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-2-pool-assign", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + "capsule.clastix.io/tenant": "claims-bindings-2", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns2) + Expect(err).Should(Succeed()) + + ns3 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns-3-pool-assign", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + "capsule.clastix.io/tenant": "something-else", + }, + }, + } + + err = k8sClient.Create(context.TODO(), ns3) + Expect(err).Should(Succeed()) + }) + + By("Verify Namespaces are shown as allowed targets", func() { + expectedNamespaces := []string{"ns-1-pool-assign", "ns-2-pool-assign"} + + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool1.Name}, pool1) + Expect(err).Should(Succeed()) + + Expect(pool1.Status.Namespaces).To(Equal(expectedNamespaces)) + Expect(pool1.Status.NamespaceSize).To(Equal(uint(2))) + }) + + By("Create a first claim and verify binding", func() { + + err := k8sClient.Create(context.TODO(), claim1) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim1) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim1.Name, Namespace: claim1.Namespace}, claim1) + Expect(err).Should(Succeed()) + + isSuccessfullyBoundToPool(pool1, claim1) + + expectedPool := api.StatusNameUID{ + Name: api.Name(pool1.Name), + UID: pool1.GetUID(), + } + Expect(claim1.Status.Pool).To(Equal(expectedPool), "expected pool name to match") + Expect(claim1.Status.Condition.Status).To(Equal(metav1.ConditionTrue), "failed to verify condition status") + Expect(claim1.Status.Condition.Type).To(Equal(meta.BoundCondition), "failed to verify condition type") + Expect(claim1.Status.Condition.Reason).To(Equal(meta.SucceededReason), "failed to verify condition reason") + }) + + By("Create a second claim and verify binding", func() { + err := k8sClient.Create(context.TODO(), claim2) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim2) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim2.Name, Namespace: claim2.Namespace}, claim2) + Expect(err).Should(Succeed()) + + isSuccessfullyBoundToPool(pool1, claim2) + + expectedPool := api.StatusNameUID{ + Name: api.Name(pool1.Name), + UID: pool1.GetUID(), + } + Expect(claim2.Status.Pool).To(Equal(expectedPool), "expected pool name to match") + Expect(claim2.Status.Condition.Status).To(Equal(metav1.ConditionTrue), "failed to verify condition status") + Expect(claim2.Status.Condition.Type).To(Equal(meta.BoundCondition), "failed to verify condition type") + Expect(claim2.Status.Condition.Reason).To(Equal(meta.SucceededReason), "failed to verify condition reason") + }) + + By("Create a third claim and verify error", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "assign-pool-claim-3", + Namespace: "ns-3-pool-assign", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + Pool: "test-binding-claims", + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0"), + }, + }, + } + + err := k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + expectedPool := api.StatusNameUID{} + Expect(claim.Status.Pool).To(Equal(expectedPool), "expected pool name to be empty") + + Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse), "failed to verify condition status") + Expect(claim.Status.Condition.Type).To(Equal(meta.AssignedCondition), "failed to verify condition type") + Expect(claim.Status.Condition.Reason).To(Equal(meta.FailedReason), "failed to verify condition reason") + }) + }) + + It("Admission (Validation) - Patch Guard", func() { + pool := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-admission-claims", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DeleteBoundResources: ptr.To(false), + }, + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "admission-guards", + }, + }, + }, + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + }, + }, + }, + } + + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admission-pool-claim-1", + Namespace: "ns-1-pool-admission", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + Pool: pool.GetName(), + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + corev1.ResourceLimitsMemory: resource.MustParse("1Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("1"), + corev1.ResourceRequestsMemory: resource.MustParse("1Gi"), + }, + }, + } + + By("Create the Claim", func() { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim.Namespace, + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + "capsule.clastix.io/tenant": "admission-guards", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns) + Expect(err).Should(Succeed()) + + err = k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + }) + + By("Create the ResourcePool", func() { + err := k8sClient.Create(context.TODO(), pool) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool) + }) + + By("Get Applied revision", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool) + Expect(err).Should(Succeed()) + }) + + By("Bind a claim", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + expectedPool := api.StatusNameUID{ + Name: api.Name(pool.Name), + UID: pool.GetUID(), + } + + isBoundCondition(claim) + Expect(claim.Status.Pool).To(Equal(expectedPool), "expected pool name to match") + }) + + By("Error on patching resources for claim (Increase)", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + claim.Spec.ResourceClaims = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + } + + err = k8sClient.Update(context.TODO(), claim) + Expect(err).ShouldNot(Succeed(), "Expected error when updating resources in bound state %s", claim) + }) + + By("Error on patching resources for claim (Decrease)", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + claim.Spec.ResourceClaims = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0Gi"), + } + + err = k8sClient.Update(context.TODO(), claim) + Expect(err).ShouldNot(Succeed(), "Expected error when updating resources in bound state %s", claim) + }) + + By("Error on patching pool name", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + claim.Spec.Pool = "some-random-pool" + + err = k8sClient.Update(context.TODO(), claim) + Expect(err).ShouldNot(Succeed(), "Expected error when updating resources in bound state %s", claim) + }) + + By("Delete Pool", func() { + err := k8sClient.Delete(context.TODO(), pool) + Expect(err).Should(Succeed()) + }) + + By("Verify claim is no longer bound", func() { + isUnassignedCondition(claim) + }) + + By("Allow patching resources for claim (Increase)", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + claim.Spec.ResourceClaims = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceLimitsMemory: resource.MustParse("2Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2Gi"), + } + + err = k8sClient.Update(context.TODO(), claim) + Expect(err).Should(Succeed(), "Expected error when updating resources in bound state %s", claim) + }) + + By("Allow patching resources for claim (Decrease)", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + claim.Spec.ResourceClaims = corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("0"), + corev1.ResourceLimitsMemory: resource.MustParse("0Gi"), + corev1.ResourceRequestsCPU: resource.MustParse("0"), + corev1.ResourceRequestsMemory: resource.MustParse("0Gi"), + } + + err = k8sClient.Update(context.TODO(), claim) + Expect(err).Should(Succeed(), "Expected error when updating resources in bound state %s", claim) + }) + + By("Allow patching pool name", func() { + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + claim.Spec.Pool = "some-random-pool" + + err = k8sClient.Update(context.TODO(), claim) + Expect(err).Should(Succeed(), "Expected no error when updating resources in bound state %s", claim) + }) + + }) + + It("Admission (Mutation) - Auto Pool Assign", func() { + pool1 := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-auto-assign-1", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DeleteBoundResources: ptr.To(false), + }, + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "admission-auto-assign", + }, + }, + }, + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourceRequestsCPU: resource.MustParse("2"), + }, + }, + }, + } + + pool2 := &capsulev1beta2.ResourcePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-auto-assign-2", + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + }, + }, + Spec: capsulev1beta2.ResourcePoolSpec{ + Config: capsulev1beta2.ResourcePoolSpecConfiguration{ + DeleteBoundResources: ptr.To(false), + }, + Selectors: []api.NamespaceSelector{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "capsule.clastix.io/tenant": "admission-auto-assign", + }, + }, + }, + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("2"), + corev1.ResourceRequestsMemory: resource.MustParse("2"), + }, + }, + }, + } + + By("Create the ResourcePools", func() { + err := k8sClient.Create(context.TODO(), pool1) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool1) + + err = k8sClient.Create(context.TODO(), pool2) + Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool2) + }) + + By("Auto Assign Claim (CPU)", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auto-assign-1", + Namespace: "ns-1-pool-assign", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + corev1.ResourceRequestsCPU: resource.MustParse("1"), + }, + }, + } + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim.Namespace, + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + "capsule.clastix.io/tenant": "admission-auto-assign", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns) + Expect(err).Should(Succeed()) + + err = k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Spec.Pool).To(Equal(pool1.Name), "expected pool name to match") + }) + + By("Auto Assign Claim (Memory)", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auto-assign-1", + Namespace: "ns-2-pool-assign", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceLimitsMemory: resource.MustParse("1"), + corev1.ResourceRequestsMemory: resource.MustParse("1"), + }, + }, + } + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim.Namespace, + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + "capsule.clastix.io/tenant": "admission-auto-assign", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns) + Expect(err).Should(Succeed()) + + err = k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Spec.Pool).To(Equal(pool2.Name), "expected pool name to match") + }) + + By("No Default available (Storage)", func() { + claim := &capsulev1beta2.ResourcePoolClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auto-assign-3", + Namespace: "ns-3-pool-assign", + }, + Spec: capsulev1beta2.ResourcePoolClaimSpec{ + ResourceClaims: corev1.ResourceList{ + corev1.ResourceRequestsStorage: resource.MustParse("1"), + }, + }, + } + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: claim.Namespace, + Labels: map[string]string{ + "e2e-resourcepoolclaims": "test", + "capsule.clastix.io/tenant": "admission-auto-assign", + }, + }, + } + + err := k8sClient.Create(context.TODO(), ns) + Expect(err).Should(Succeed()) + + err = k8sClient.Create(context.TODO(), claim) + Expect(err).Should(Succeed(), "Failed to create Claim %s", claim) + + err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim) + Expect(err).Should(Succeed()) + + Expect(claim.Spec.Pool).To(Equal(""), "expected pool name to match") + }) + + }) + +}) + +func isUnassignedCondition(claim *capsulev1beta2.ResourcePoolClaim) { + cl := &capsulev1beta2.ResourcePoolClaim{} + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, cl) + Expect(err).Should(Succeed()) + + Expect(cl.Status.Condition.Status).To(Equal(metav1.ConditionFalse), "failed to verify condition status") + Expect(cl.Status.Condition.Type).To(Equal(meta.AssignedCondition), "failed to verify condition type") + Expect(cl.Status.Condition.Reason).To(Equal(meta.FailedReason), "failed to verify condition reason") +} + +func isBoundCondition(claim *capsulev1beta2.ResourcePoolClaim) { + cl := &capsulev1beta2.ResourcePoolClaim{} + err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, cl) + Expect(err).Should(Succeed()) + + Expect(cl.Status.Condition.Status).To(Equal(metav1.ConditionTrue), "failed to verify condition status") + Expect(cl.Status.Condition.Type).To(Equal(meta.BoundCondition), "failed to verify condition type") + Expect(cl.Status.Condition.Reason).To(Equal(meta.SucceededReason), "failed to verify condition reason") +} diff --git a/e2e/sa_prevent_privilege_escalation_test.go b/e2e/sa_prevent_privilege_escalation_test.go index 3b92c23e..1270b831 100644 --- a/e2e/sa_prevent_privilege_escalation_test.go +++ b/e2e/sa_prevent_privilege_escalation_test.go @@ -20,7 +20,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("trying to escalate from a Tenant Namespace ServiceAccount", func() { +var _ = Describe("trying to escalate from a Tenant Namespace ServiceAccount", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "sa-privilege-escalation", diff --git a/e2e/selecting_non_owned_tenant_test.go b/e2e/selecting_non_owned_tenant_test.go index d966613a..dc388544 100644 --- a/e2e/selecting_non_owned_tenant_test.go +++ b/e2e/selecting_non_owned_tenant_test.go @@ -16,7 +16,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace trying to select a third Tenant", func() { +var _ = Describe("creating a Namespace trying to select a third Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-non-owned", diff --git a/e2e/selecting_tenant_fail_test.go b/e2e/selecting_tenant_fail_test.go index df0b2cbf..ed8d3d15 100644 --- a/e2e/selecting_tenant_fail_test.go +++ b/e2e/selecting_tenant_fail_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace without a Tenant selector when user owns multiple Tenants", func() { +var _ = Describe("creating a Namespace without a Tenant selector when user owns multiple Tenants", Label("tenant"), func() { t1 := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-one", diff --git a/e2e/selecting_tenant_with_label_test.go b/e2e/selecting_tenant_with_label_test.go index a2a9d78f..69647d2c 100644 --- a/e2e/selecting_tenant_with_label_test.go +++ b/e2e/selecting_tenant_with_label_test.go @@ -15,7 +15,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Namespace with Tenant selector when user owns multiple tenants", func() { +var _ = Describe("creating a Namespace with Tenant selector when user owns multiple tenants", Label("tenant"), func() { t1 := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-one", diff --git a/e2e/service_forbidden_metadata_test.go b/e2e/service_forbidden_metadata_test.go index aa59088b..952941a7 100644 --- a/e2e/service_forbidden_metadata_test.go +++ b/e2e/service_forbidden_metadata_test.go @@ -16,7 +16,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("creating a Service with user-specified labels and annotations", func() { +var _ = Describe("creating a Service with user-specified labels and annotations", Label("tenant", "service"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-user-metadata-forbidden", diff --git a/e2e/service_metadata_test.go b/e2e/service_metadata_test.go index 93c4d2e1..160ca420 100644 --- a/e2e/service_metadata_test.go +++ b/e2e/service_metadata_test.go @@ -23,7 +23,7 @@ import ( "github.com/projectcapsule/capsule/pkg/utils" ) -var _ = Describe("adding metadata to Service objects", func() { +var _ = Describe("adding metadata to Service objects", Label("tenant", "service"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "service-metadata", diff --git a/e2e/storage_class_test.go b/e2e/storage_class_test.go index 94f9edbc..c76520fb 100644 --- a/e2e/storage_class_test.go +++ b/e2e/storage_class_test.go @@ -26,7 +26,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("when Tenant handles Storage classes", func() { +var _ = Describe("when Tenant handles Storage classes", Label("tenant", "storage"), func() { tntNoDefaults := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "storage-class-selector", diff --git a/e2e/tenant_cordoning_test.go b/e2e/tenant_cordoning_test.go index b7cc2c5d..a65cdd5e 100644 --- a/e2e/tenant_cordoning_test.go +++ b/e2e/tenant_cordoning_test.go @@ -5,9 +5,10 @@ package e2e import ( "context" - "github.com/projectcapsule/capsule/pkg/utils" "time" + "github.com/projectcapsule/capsule/pkg/utils" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -17,7 +18,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("cordoning a Tenant", func() { +var _ = Describe("cordoning a Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-cordoning", diff --git a/e2e/tenant_metadata_test.go b/e2e/tenant_metadata_test.go index 099c2025..fe31c5c5 100644 --- a/e2e/tenant_metadata_test.go +++ b/e2e/tenant_metadata_test.go @@ -23,7 +23,7 @@ func getLabels(tnt capsulev1beta2.Tenant) (map[string]string, error) { return current.GetLabels(), nil } -var _ = Describe("adding metadata to a Tenant", func() { +var _ = Describe("adding metadata to a Tenant", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-metadata", diff --git a/e2e/tenant_name_webhook_test.go b/e2e/tenant_name_webhook_test.go index a2838fdb..f7bc0569 100644 --- a/e2e/tenant_name_webhook_test.go +++ b/e2e/tenant_name_webhook_test.go @@ -13,7 +13,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating a Tenant with wrong name", func() { +var _ = Describe("creating a Tenant with wrong name", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "non_rfc_dns_1123", diff --git a/e2e/tenant_protected_webhook_test.go b/e2e/tenant_protected_webhook_test.go index e6b4870f..3637805d 100644 --- a/e2e/tenant_protected_webhook_test.go +++ b/e2e/tenant_protected_webhook_test.go @@ -14,7 +14,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("Deleting a tenant with protected annotation", func() { +var _ = Describe("Deleting a tenant with protected annotation", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "protected-tenant", diff --git a/e2e/tenant_resources_changes_test.go b/e2e/tenant_resources_changes_test.go index d0a12109..37ddd4e4 100644 --- a/e2e/tenant_resources_changes_test.go +++ b/e2e/tenant_resources_changes_test.go @@ -21,7 +21,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("changing Tenant managed Kubernetes resources", func() { +var _ = Describe("changing Tenant managed Kubernetes resources", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-resources-changes", diff --git a/e2e/tenant_resources_test.go b/e2e/tenant_resources_test.go index e4e72f94..498493ff 100644 --- a/e2e/tenant_resources_test.go +++ b/e2e/tenant_resources_test.go @@ -21,7 +21,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" ) -var _ = Describe("creating namespaces within a Tenant with resources", func() { +var _ = Describe("creating namespaces within a Tenant with resources", Label("tenant"), func() { tnt := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "tenant-resources", diff --git a/e2e/tenantresource_test.go b/e2e/tenantresource_test.go index 9e0fbe02..c24f57de 100644 --- a/e2e/tenantresource_test.go +++ b/e2e/tenantresource_test.go @@ -24,7 +24,7 @@ import ( "github.com/projectcapsule/capsule/pkg/api" ) -var _ = Describe("Creating a TenantResource object", func() { +var _ = Describe("Creating a TenantResource object", Label("tenantresource"), func() { solar := &capsulev1beta2.Tenant{ ObjectMeta: metav1.ObjectMeta{ Name: "energy-solar", diff --git a/e2e/utils_test.go b/e2e/utils_test.go index 765998b1..3e7688ea 100644 --- a/e2e/utils_test.go +++ b/e2e/utils_test.go @@ -6,6 +6,7 @@ package e2e import ( "context" "fmt" + "reflect" "strings" "time" @@ -13,6 +14,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -172,3 +174,57 @@ func GetKubernetesVersion() *versionUtil.Version { return ver } + +func DeepCompare(expected, actual interface{}) (bool, string) { + expVal := reflect.ValueOf(expected) + actVal := reflect.ValueOf(actual) + + // If the kinds differ, they are not equal. + if expVal.Kind() != actVal.Kind() { + return false, fmt.Sprintf("kind mismatch: %v vs %v", expVal.Kind(), actVal.Kind()) + } + + switch expVal.Kind() { + case reflect.Slice, reflect.Array: + // Convert slices to []interface{} for ElementsMatch. + expSlice := make([]interface{}, expVal.Len()) + actSlice := make([]interface{}, actVal.Len()) + for i := 0; i < expVal.Len(); i++ { + expSlice[i] = expVal.Index(i).Interface() + } + for i := 0; i < actVal.Len(); i++ { + actSlice[i] = actVal.Index(i).Interface() + } + // Use a dummy tester to capture error messages. + dummy := &dummyT{} + if !assert.ElementsMatch(dummy, expSlice, actSlice) { + return false, fmt.Sprintf("slice mismatch: %v", dummy.errors) + } + return true, "" + case reflect.Struct: + // Iterate over fields and compare recursively. + for i := 0; i < expVal.NumField(); i++ { + fieldName := expVal.Type().Field(i).Name + ok, msg := DeepCompare(expVal.Field(i).Interface(), actVal.Field(i).Interface()) + if !ok { + return false, fmt.Sprintf("field %s mismatch: %s", fieldName, msg) + } + } + return true, "" + default: + // Fallback to reflect.DeepEqual for other types. + if !reflect.DeepEqual(expected, actual) { + return false, fmt.Sprintf("expected %v but got %v", expected, actual) + } + return true, "" + } +} + +// dummyT implements a minimal TestingT for testify. +type dummyT struct { + errors []string +} + +func (d *dummyT) Errorf(format string, args ...interface{}) { + d.errors = append(d.errors, fmt.Sprintf(format, args...)) +} diff --git a/pkg/api/selectors.go b/pkg/api/selectors.go new file mode 100644 index 00000000..e3a20b05 --- /dev/null +++ b/pkg/api/selectors.go @@ -0,0 +1,49 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Selector for resources and their labels or selecting origin namespaces +// +kubebuilder:object:generate=true +type NamespaceSelector struct { + // Select Items based on their labels. If the namespaceSelector is also set, the selector is applied + // to items within the selected namespaces. Otherwise for all the items. + *metav1.LabelSelector `json:",inline"` +} + +// GetMatchingNamespaces retrieves the list of namespaces that match the NamespaceSelector. +func (s *NamespaceSelector) GetMatchingNamespaces(ctx context.Context, client client.Client) ([]corev1.Namespace, error) { + if s.LabelSelector == nil { + return nil, nil // No namespace selector means all namespaces + } + + nsSelector, err := metav1.LabelSelectorAsSelector(s.LabelSelector) + if err != nil { + return nil, fmt.Errorf("invalid namespace selector: %w", err) + } + + namespaceList := &corev1.NamespaceList{} + if err := client.List(ctx, namespaceList); err != nil { + return nil, fmt.Errorf("failed to list namespaces: %w", err) + } + + var matchingNamespaces []corev1.Namespace + + for _, ns := range namespaceList.Items { + if nsSelector.Matches(labels.Set(ns.Labels)) { + matchingNamespaces = append(matchingNamespaces, ns) + } + } + + return matchingNamespaces, nil +} diff --git a/pkg/api/status.go b/pkg/api/status.go new file mode 100644 index 00000000..04c7fea8 --- /dev/null +++ b/pkg/api/status.go @@ -0,0 +1,32 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + k8stypes "k8s.io/apimachinery/pkg/types" +) + +// Name must be unique within a namespace. Is required when creating resources, although +// some resources may allow a client to request the generation of an appropriate name +// automatically. Name is primarily intended for creation idempotence and configuration +// definition. +// Cannot be updated. +// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names +// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$` +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:object:generate=true +type Name string + +func (n Name) String() string { + return string(n) +} + +type StatusNameUID struct { + // Name + Name Name `json:"name,omitempty"` + // Namespace + Namespace Name `json:"namespace,omitempty"` + // UID of the tracked Tenant to pin point tracking + k8stypes.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid"` +} diff --git a/pkg/api/zz_generated.deepcopy.go b/pkg/api/zz_generated.deepcopy.go index 540cfe62..0c7ec15e 100644 --- a/pkg/api/zz_generated.deepcopy.go +++ b/pkg/api/zz_generated.deepcopy.go @@ -225,6 +225,26 @@ func (in *LimitRangesSpec) DeepCopy() *LimitRangesSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSelector. +func (in *NamespaceSelector) DeepCopy() *NamespaceSelector { + if in == nil { + return nil + } + out := new(NamespaceSelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { *out = *in diff --git a/pkg/indexer/indexer.go b/pkg/indexer/indexer.go index 9d7b0fa8..ea36a622 100644 --- a/pkg/indexer/indexer.go +++ b/pkg/indexer/indexer.go @@ -17,6 +17,7 @@ import ( capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" "github.com/projectcapsule/capsule/pkg/indexer/ingress" "github.com/projectcapsule/capsule/pkg/indexer/namespace" + "github.com/projectcapsule/capsule/pkg/indexer/resourcepool" "github.com/projectcapsule/capsule/pkg/indexer/tenant" "github.com/projectcapsule/capsule/pkg/indexer/tenantresource" "github.com/projectcapsule/capsule/pkg/utils" @@ -31,6 +32,8 @@ type CustomIndexer interface { func AddToManager(ctx context.Context, log logr.Logger, mgr manager.Manager) error { indexers := []CustomIndexer{ tenant.NamespacesReference{Obj: &capsulev1beta2.Tenant{}}, + resourcepool.NamespacesReference{Obj: &capsulev1beta2.ResourcePool{}}, + resourcepool.PoolUIDReference{Obj: &capsulev1beta2.ResourcePoolClaim{}}, tenant.OwnerReference{}, namespace.OwnerReference{}, ingress.HostnamePath{Obj: &extensionsv1beta1.Ingress{}}, diff --git a/pkg/indexer/resourcepool/claim.go b/pkg/indexer/resourcepool/claim.go new file mode 100644 index 00000000..3f6c58c9 --- /dev/null +++ b/pkg/indexer/resourcepool/claim.go @@ -0,0 +1,33 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resourcepool + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" +) + +type PoolUIDReference struct { + Obj client.Object +} + +func (o PoolUIDReference) Object() client.Object { + return o.Obj +} + +func (o PoolUIDReference) Field() string { + return ".status.pool.uid" +} + +func (o PoolUIDReference) Func() client.IndexerFunc { + return func(object client.Object) []string { + grq, ok := object.(*capsulev1beta2.ResourcePoolClaim) + if !ok { + return nil + } + + return []string{string(grq.Status.Pool.UID)} + } +} diff --git a/pkg/indexer/resourcepool/namespaces.go b/pkg/indexer/resourcepool/namespaces.go new file mode 100644 index 00000000..aaf1c7df --- /dev/null +++ b/pkg/indexer/resourcepool/namespaces.go @@ -0,0 +1,34 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resourcepool + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" +) + +// NamespacesReference defines the indexer logic for GlobalResourceQuota namespaces. +type NamespacesReference struct { + Obj client.Object +} + +func (o NamespacesReference) Object() client.Object { + return o.Obj +} + +func (o NamespacesReference) Field() string { + return ".status.namespaces" +} + +func (o NamespacesReference) Func() client.IndexerFunc { + return func(object client.Object) []string { + rp, ok := object.(*capsulev1beta2.ResourcePool) + if !ok { + return nil + } + + return rp.Status.Namespaces + } +} diff --git a/pkg/meta/annotations.go b/pkg/meta/annotations.go new file mode 100644 index 00000000..93849c6a --- /dev/null +++ b/pkg/meta/annotations.go @@ -0,0 +1,45 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package meta + +import ( + "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + ReleaseAnnotation = "projectcapsule.dev/release" + ReleaseAnnotationTrigger = "true" +) + +func ReleaseAnnotationTriggers(obj client.Object) bool { + return annotationTriggers(obj, ReleaseAnnotation, ReleaseAnnotationTrigger) +} + +func ReleaseAnnotationRemove(obj client.Object) { + annotationRemove(obj, ReleaseAnnotation) +} + +func annotationRemove(obj client.Object, anno string) { + annotations := obj.GetAnnotations() + + if _, ok := annotations[anno]; ok { + delete(annotations, anno) + + obj.SetAnnotations(annotations) + } +} + +func annotationTriggers(obj client.Object, anno string, trigger string) bool { + annotations := obj.GetAnnotations() + + if val, ok := annotations[anno]; ok { + if strings.ToLower(val) == trigger { + return true + } + } + + return false +} diff --git a/pkg/meta/conditions.go b/pkg/meta/conditions.go new file mode 100644 index 00000000..9c695a65 --- /dev/null +++ b/pkg/meta/conditions.go @@ -0,0 +1,41 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package meta + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // ReadyCondition indicates the resource is ready and fully reconciled. + ReadyCondition string = "Ready" + NotReadyCondition string = "NotReady" + + AssignedCondition string = "Assigned" + BoundCondition string = "Bound" + + // FailedReason indicates a condition or event observed a failure (Claim Rejected). + SucceededReason string = "Succeeded" + FailedReason string = "Failed" + PoolExhaustedReason string = "PoolExhausted" + QueueExhaustedReason string = "QueueExhausted" + NamespaceExhaustedReason string = "NamespaceExhausted" +) + +func NewBoundCondition(obj client.Object) metav1.Condition { + return metav1.Condition{ + Type: BoundCondition, + ObservedGeneration: obj.GetGeneration(), + LastTransitionTime: metav1.Now(), + } +} + +func NewAssignedCondition(obj client.Object) metav1.Condition { + return metav1.Condition{ + Type: AssignedCondition, + ObservedGeneration: obj.GetGeneration(), + LastTransitionTime: metav1.Now(), + } +} diff --git a/pkg/meta/finalizers.go b/pkg/meta/finalizers.go new file mode 100644 index 00000000..9e951fcb --- /dev/null +++ b/pkg/meta/finalizers.go @@ -0,0 +1,8 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package meta + +const ( + ControllerFinalizer = "controller.projectcapsule.dev/finalize" +) diff --git a/pkg/meta/labels.go b/pkg/meta/labels.go new file mode 100644 index 00000000..a1ee6677 --- /dev/null +++ b/pkg/meta/labels.go @@ -0,0 +1,45 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package meta + +import ( + "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + FreezeLabel = "projectcapsule.dev/freeze" + FreezeLabelTrigger = "true" +) + +func FreezeLabelTriggers(obj client.Object) bool { + return labelTriggers(obj, FreezeLabel, FreezeLabelTrigger) +} + +func FreezeLabelRemove(obj client.Object) { + labelRemove(obj, FreezeLabel) +} + +func labelRemove(obj client.Object, anno string) { + annotations := obj.GetLabels() + + if _, ok := annotations[anno]; ok { + delete(annotations, anno) + + obj.SetLabels(annotations) + } +} + +func labelTriggers(obj client.Object, anno string, trigger string) bool { + annotations := obj.GetLabels() + + if val, ok := annotations[anno]; ok { + if strings.ToLower(val) == trigger { + return true + } + } + + return false +} diff --git a/pkg/meta/ownerreference.go b/pkg/meta/ownerreference.go new file mode 100644 index 00000000..0e6703a6 --- /dev/null +++ b/pkg/meta/ownerreference.go @@ -0,0 +1,69 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package meta + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// Adds an ownerreferences, which does not delete the object when the owner is deleted. +func SetLooseOwnerReference( + obj client.Object, + owner client.Object, + schema *runtime.Scheme, +) (err error) { + err = controllerutil.SetOwnerReference(owner, obj, schema) + if err != nil { + return err + } + + ownerRefs := obj.GetOwnerReferences() + for i, ownerRef := range ownerRefs { + if ownerRef.UID == owner.GetUID() { + if ownerRef.BlockOwnerDeletion != nil || ownerRef.Controller != nil { + ownerRefs[i].BlockOwnerDeletion = nil + ownerRefs[i].Controller = nil + } + + break + } + } + + return nil +} + +// Removes a Loose Ownerreference based on UID. +func RemoveLooseOwnerReference( + obj client.Object, + owner client.Object, +) { + refs := []metav1.OwnerReference{} + + for _, ownerRef := range obj.GetOwnerReferences() { + if ownerRef.UID == owner.GetUID() { + continue + } + + refs = append(refs, ownerRef) + } + + obj.SetOwnerReferences(refs) +} + +// If not returns false. +func HasLooseOwnerReference( + obj client.Object, + owner client.Object, +) bool { + for _, ownerRef := range obj.GetOwnerReferences() { + if ownerRef.UID == owner.GetUID() { + return true + } + } + + return false +} diff --git a/pkg/meta/ownerreference_test.go b/pkg/meta/ownerreference_test.go new file mode 100644 index 00000000..6fa9d623 --- /dev/null +++ b/pkg/meta/ownerreference_test.go @@ -0,0 +1,60 @@ +package meta + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + "github.com/stretchr/testify/assert" +) + +func TestLooseOwnerReferenceHelpers(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + owner := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "owner", + Namespace: "default", + UID: types.UID("owner-uid"), + }, + } + + target := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "target", + Namespace: "default", + }, + } + + t.Run("SetLooseOwnerReference adds and clears controller fields", func(t *testing.T) { + err := SetLooseOwnerReference(target, owner, scheme) + assert.NoError(t, err) + + refs := target.GetOwnerReferences() + assert.Len(t, refs, 1) + ref := refs[0] + assert.Equal(t, owner.UID, ref.UID) + assert.Nil(t, ref.BlockOwnerDeletion) + assert.Nil(t, ref.Controller) + }) + + t.Run("HasLooseOwnerReference returns true if present", func(t *testing.T) { + result := HasLooseOwnerReference(target, owner) + assert.True(t, result) + }) + + t.Run("RemoveLooseOwnerReference removes the reference", func(t *testing.T) { + RemoveLooseOwnerReference(target, owner) + refs := target.GetOwnerReferences() + assert.Len(t, refs, 0) + }) + + t.Run("HasLooseOwnerReference returns false if not present", func(t *testing.T) { + result := HasLooseOwnerReference(target, owner) + assert.False(t, result) + }) +} diff --git a/pkg/metrics/claim_recorder.go b/pkg/metrics/claim_recorder.go new file mode 100644 index 00000000..bd0cabdc --- /dev/null +++ b/pkg/metrics/claim_recorder.go @@ -0,0 +1,85 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + "github.com/projectcapsule/capsule/pkg/meta" +) + +type ClaimRecorder struct { + claimConditionGauge *prometheus.GaugeVec + claimResourcesGauge *prometheus.GaugeVec +} + +func MustMakeClaimRecorder() *ClaimRecorder { + metricsRecorder := NewClaimRecorder() + crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + + return metricsRecorder +} + +func NewClaimRecorder() *ClaimRecorder { + return &ClaimRecorder{ + claimConditionGauge: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "claim_condition", + Help: "The current condition status of a claim.", + }, + []string{"name", "target_namespace", "condition", "status", "reason", "pool"}, + ), + claimResourcesGauge: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "claim_resource", + Help: "The given amount of resources from the claim", + }, + []string{"name", "target_namespace", "resource"}, + ), + } +} + +func (r *ClaimRecorder) Collectors() []prometheus.Collector { + return []prometheus.Collector{ + r.claimConditionGauge, + } +} + +// RecordCondition records the condition as given for the ref. +func (r *ClaimRecorder) RecordClaimCondition(claim *capsulev1beta2.ResourcePoolClaim) { + for _, status := range []string{meta.AssignedCondition, meta.BoundCondition} { + var value float64 + if status == claim.Status.Condition.Type { + value = 1 + } + + r.claimConditionGauge.WithLabelValues( + claim.Name, + claim.Namespace, + status, + string(claim.Status.Condition.Status), + claim.Status.Condition.Reason, + claim.Status.Pool.Name.String(), + ).Set(value) + } + + for resourceName, qt := range claim.Spec.ResourceClaims { + r.claimResourcesGauge.WithLabelValues( + claim.Name, + claim.Namespace, + resourceName.String(), + ).Set(float64(qt.MilliValue()) / 1000) + } +} + +// DeleteCondition deletes the condition metrics for the ref. +func (r *ClaimRecorder) DeleteClaimMetric(claim string) { + for _, status := range []string{meta.ReadyCondition, meta.NotReadyCondition} { + r.claimConditionGauge.DeleteLabelValues(claim, status) + } +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go deleted file mode 100644 index b9b06044..00000000 --- a/pkg/metrics/metrics.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020-2023 Project Capsule Authors. -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -import ( - "github.com/prometheus/client_golang/prometheus" - "sigs.k8s.io/controller-runtime/pkg/metrics" -) - -var ( - metricsPrefix = "capsule_" - - TenantResourceUsage = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: metricsPrefix + "tenant_resource_usage", - Help: "Current resource usage for a given resource in a tenant", - }, []string{"tenant", "resource", "resourcequotaindex"}) - - TenantResourceLimit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: metricsPrefix + "tenant_resource_limit", - Help: "Current resource limit for a given resource in a tenant", - }, []string{"tenant", "resource", "resourcequotaindex"}) -) - -func init() { - metrics.Registry.MustRegister( - TenantResourceUsage, - TenantResourceLimit, - ) -} diff --git a/pkg/metrics/pool_recorder.go b/pkg/metrics/pool_recorder.go new file mode 100644 index 00000000..a4e60890 --- /dev/null +++ b/pkg/metrics/pool_recorder.go @@ -0,0 +1,168 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" +) + +type ResourcePoolRecorder struct { + poolResource *prometheus.GaugeVec + poolResourceLimit *prometheus.GaugeVec + poolResourceAvailable *prometheus.GaugeVec + poolResourceUsage *prometheus.GaugeVec + poolResourceExhaustion *prometheus.GaugeVec + poolNamespaceResourceUsage *prometheus.GaugeVec +} + +func MustMakeResourcePoolRecorder() *ResourcePoolRecorder { + metricsRecorder := NewResourcePoolRecorder() + crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + + return metricsRecorder +} + +func NewResourcePoolRecorder() *ResourcePoolRecorder { + return &ResourcePoolRecorder{ + poolResourceExhaustion: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "pool_exhaustion", + Help: "Resources become exhausted, when there's not enough available for all claims and the claims get queued", + }, + []string{"pool", "resource"}, + ), + poolResource: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "pool_resource", + Help: "Type of resource being used in a resource pool", + }, + []string{"pool", "resource"}, + ), + poolResourceLimit: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "pool_limit", + Help: "Current resource limit for a given resource in a resource pool", + }, + []string{"pool", "resource"}, + ), + poolResourceUsage: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "pool_usage", + Help: "Current resource usage for a given resource in a resource pool", + }, + []string{"pool", "resource"}, + ), + + poolResourceAvailable: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "pool_available", + Help: "Current resource availability for a given resource in a resource pool", + }, + []string{"pool", "resource"}, + ), + poolNamespaceResourceUsage: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "pool_namespace_usage", + Help: "Current resources claimed on namespace basis for a given resource in a resource pool for a specific namespace", + }, + []string{"pool", "target_namespace", "resource"}, + ), + } +} + +func (r *ResourcePoolRecorder) Collectors() []prometheus.Collector { + return []prometheus.Collector{ + r.poolResource, + r.poolResourceLimit, + r.poolResourceUsage, + r.poolResourceAvailable, + r.poolResourceExhaustion, + r.poolNamespaceResourceUsage, + } +} + +// Emit current hard limits and usage for a resource pool. +func (r *ResourcePoolRecorder) ResourceUsageMetrics(pool *capsulev1beta2.ResourcePool) { + for resourceName, quantity := range pool.Status.Allocation.Hard { + r.poolResourceLimit.WithLabelValues( + pool.Name, + resourceName.String(), + ).Set(float64(quantity.MilliValue()) / 1000) + + r.poolResource.WithLabelValues( + pool.Name, + resourceName.String(), + ).Set(float64(1)) + + claimed, exists := pool.Status.Allocation.Claimed[resourceName] + if !exists { + r.poolResourceUsage.DeletePartialMatch(map[string]string{ + "pool": pool.Name, + "resource": resourceName.String(), + }) + + continue + } + + r.poolResourceUsage.WithLabelValues( + pool.Name, + resourceName.String(), + ).Set(float64(claimed.MilliValue()) / 1000) + + available := pool.Status.Allocation.Available[resourceName] + r.poolResourceAvailable.WithLabelValues( + pool.Name, + resourceName.String(), + ).Set(float64(available.MilliValue()) / 1000) + } + + r.resourceUsageMetricsByNamespace(pool) +} + +// Delete all metrics for a namespace in a resource pool. +func (r *ResourcePoolRecorder) DeleteResourcePoolNamespaceMetric(pool string, namespace string) { + r.poolNamespaceResourceUsage.DeletePartialMatch(map[string]string{"pool": pool, "namespace": namespace}) +} + +// Delete all metrics for a resource pool. +func (r *ResourcePoolRecorder) DeleteResourcePoolMetric(pool string) { + r.cleanupAllMetricForLabels(map[string]string{"pool": pool}) +} + +func (r *ResourcePoolRecorder) DeleteResourcePoolSingleResourceMetric(pool string, resourceName string) { + r.cleanupAllMetricForLabels(map[string]string{"pool": pool, "resource": resourceName}) +} + +func (r *ResourcePoolRecorder) cleanupAllMetricForLabels(labels map[string]string) { + r.poolResourceLimit.DeletePartialMatch(labels) + r.poolResourceAvailable.DeletePartialMatch(labels) + r.poolResourceUsage.DeletePartialMatch(labels) + r.poolNamespaceResourceUsage.DeletePartialMatch(labels) + r.poolResource.DeletePartialMatch(labels) + r.poolResourceExhaustion.DeletePartialMatch(labels) +} + +// Calculate allocation per namespace for metric. +func (r *ResourcePoolRecorder) resourceUsageMetricsByNamespace(pool *capsulev1beta2.ResourcePool) { + resources := pool.GetClaimedByNamespaceClaims() + + for namespace, claims := range resources { + for resourceName, quantity := range claims { + r.poolNamespaceResourceUsage.WithLabelValues( + pool.Name, + namespace, + resourceName.String(), + ).Set(float64(quantity.MilliValue()) / 1000) + } + } +} diff --git a/pkg/metrics/tenant_recorder.go b/pkg/metrics/tenant_recorder.go new file mode 100644 index 00000000..dab824bb --- /dev/null +++ b/pkg/metrics/tenant_recorder.go @@ -0,0 +1,57 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +type TenantRecorder struct { + TenantResourceUsageGauge *prometheus.GaugeVec + TenantResourceLimitGauge *prometheus.GaugeVec +} + +func MustMakeTenantRecorder() *TenantRecorder { + metricsRecorder := NewTenantRecorder() + crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + + return metricsRecorder +} + +func NewTenantRecorder() *TenantRecorder { + return &TenantRecorder{ + TenantResourceUsageGauge: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "tenant_resource_usage", + Help: "Current resource usage for a given resource in a tenant", + }, []string{"tenant", "resource", "resourcequotaindex"}, + ), + TenantResourceLimitGauge: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricsPrefix, + Name: "tenant_resource_limit", + Help: "Current resource limit for a given resource in a tenant", + }, []string{"tenant", "resource", "resourcequotaindex"}, + ), + } +} + +func (r *TenantRecorder) Collectors() []prometheus.Collector { + return []prometheus.Collector{ + r.TenantResourceUsageGauge, + r.TenantResourceLimitGauge, + } +} + +// DeleteCondition deletes the condition metrics for the ref. +func (r *TenantRecorder) DeleteTenantMetric(tenant string) { + r.TenantResourceUsageGauge.DeletePartialMatch(map[string]string{ + "tenant": tenant, + }) + r.TenantResourceLimitGauge.DeletePartialMatch(map[string]string{ + "tenant": tenant, + }) +} diff --git a/pkg/metrics/utils.go b/pkg/metrics/utils.go new file mode 100644 index 00000000..ebd5564d --- /dev/null +++ b/pkg/metrics/utils.go @@ -0,0 +1,8 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +const ( + metricsPrefix = "capsule" +) diff --git a/pkg/utils/names.go b/pkg/utils/names.go new file mode 100644 index 00000000..ea4c072d --- /dev/null +++ b/pkg/utils/names.go @@ -0,0 +1,14 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" +) + +func PoolResourceQuotaName(quota *capsulev1beta2.ResourcePool) string { + return fmt.Sprintf("capsule-pool-%s", quota.Name) +} diff --git a/pkg/utils/tenant_labels.go b/pkg/utils/tenant_labels.go index c1a10ada..cb43bcbb 100644 --- a/pkg/utils/tenant_labels.go +++ b/pkg/utils/tenant_labels.go @@ -23,6 +23,8 @@ func GetTypeLabel(t runtime.Object) (label string, err error) { switch v := t.(type) { case *v1beta1.Tenant, *v1beta2.Tenant: return "capsule.clastix.io/tenant", nil + case *v1beta2.ResourcePool: + return "projectcapsule.dev/pool", nil case *corev1.LimitRange: return "capsule.clastix.io/limit-range", nil case *networkingv1.NetworkPolicy: diff --git a/pkg/webhook/defaults/errors.go b/pkg/webhook/defaults/errors.go index 799d4309..1fabe180 100644 --- a/pkg/webhook/defaults/errors.go +++ b/pkg/webhook/defaults/errors.go @@ -6,6 +6,7 @@ package defaults import ( "fmt" "reflect" + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" ) @@ -63,7 +64,6 @@ type GatewayError struct { } func NewGatewayError(gateway gatewayv1.ObjectName, msg error) error { - return &GatewayError{ gateway: reflect.ValueOf(gateway).String(), msg: msg, diff --git a/pkg/webhook/defaults/gateway.go b/pkg/webhook/defaults/gateway.go index e312837c..eda8baf2 100644 --- a/pkg/webhook/defaults/gateway.go +++ b/pkg/webhook/defaults/gateway.go @@ -6,10 +6,11 @@ package defaults import ( "context" "encoding/json" + "net/http" + corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - "net/http" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" @@ -42,6 +43,7 @@ func mutateGatewayDefaults(ctx context.Context, req admission.Request, c client. } var mutate bool + gatewayClass, err := utils.GetGatewayClassClassByObjectName(ctx, c, gatewayObj.Spec.GatewayClassName) if gatewayClass == nil { @@ -63,6 +65,7 @@ func mutateGatewayDefaults(ctx context.Context, req admission.Request, c client. } else { mutate = true } + if mutate = mutate || (gatewayClass.Name == allowed.Default); !mutate { return nil } diff --git a/pkg/webhook/resourcepool/claim_mutating.go b/pkg/webhook/resourcepool/claim_mutating.go new file mode 100644 index 00000000..c4131bb3 --- /dev/null +++ b/pkg/webhook/resourcepool/claim_mutating.go @@ -0,0 +1,157 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 +package resourcepool + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + "github.com/projectcapsule/capsule/pkg/meta" + capsulewebhook "github.com/projectcapsule/capsule/pkg/webhook" + "github.com/projectcapsule/capsule/pkg/webhook/utils" +) + +type claimMutationHandler struct { + log logr.Logger +} + +func ClaimMutationHandler(log logr.Logger) capsulewebhook.Handler { + return &claimMutationHandler{log: log} +} + +func (h *claimMutationHandler) OnUpdate(c client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func { + return func(ctx context.Context, req admission.Request) *admission.Response { + return h.handle(ctx, req, decoder, c) + } +} + +func (h *claimMutationHandler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func { + return func(context.Context, admission.Request) *admission.Response { + return nil + } +} + +func (h *claimMutationHandler) OnCreate(c client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func { + return func(ctx context.Context, req admission.Request) *admission.Response { + return h.handle(ctx, req, decoder, c) + } +} + +func (h *claimMutationHandler) handle( + ctx context.Context, + req admission.Request, + decoder admission.Decoder, + c client.Client, +) *admission.Response { + claim := &capsulev1beta2.ResourcePoolClaim{} + + if err := decoder.Decode(req, claim); err != nil { + return utils.ErroredResponse(fmt.Errorf("failed to decode new object: %w", err)) + } + + if err := h.autoAssignPools(ctx, c, claim); err != nil { + response := admission.Errored(http.StatusInternalServerError, err) + + return &response + } + + h.handleReleaseAnnotation(claim) + + marshaled, err := json.Marshal(claim) + if err != nil { + response := admission.Errored(http.StatusInternalServerError, err) + + return &response + } + + response := admission.PatchResponseFromRaw(req.Object.Raw, marshaled) + + return &response +} + +// Only Adds release label when necessary. +func (h *claimMutationHandler) handleReleaseAnnotation( + claim *capsulev1beta2.ResourcePoolClaim, +) { + if !meta.ReleaseAnnotationTriggers(claim) { + return + } + + if !claim.IsBoundToResourcePool() { + return + } + + meta.ReleaseAnnotationRemove(claim) +} + +func (h *claimMutationHandler) autoAssignPools( + ctx context.Context, + c client.Client, + claim *capsulev1beta2.ResourcePoolClaim, +) error { + if claim.Spec.Pool != "" { + return nil + } + + poolList := &capsulev1beta2.ResourcePoolList{} + if err := c.List(ctx, poolList, client.MatchingFieldsSelector{ + Selector: fields.OneTermEqualSelector(".status.namespaces", claim.Namespace), + }); err != nil { + return err + } + + if len(poolList.Items) == 0 { + return nil + } + + candidates := make([]*capsulev1beta2.ResourcePool, 0) + + for _, pool := range poolList.Items { + assignable := true + allocatable := true + + for resource, requested := range claim.Spec.ResourceClaims { + if _, ok := pool.Status.Allocation.Hard[resource]; !ok { + assignable = false + + break + } + + available, ok := pool.Status.Allocation.Available[resource] + if !ok || available.Cmp(requested) < 0 { + allocatable = false + + break + } + } + + if !assignable { + continue + } + + if allocatable { + candidates = append([]*capsulev1beta2.ResourcePool{&pool}, candidates...) + + continue + } + + candidates = append(candidates, &pool) + } + + if len(candidates) == 0 { + return nil // no eligible pools + } + + claim.Spec.Pool = candidates[0].Name + + return nil +} diff --git a/pkg/webhook/resourcepool/claim_validating.go b/pkg/webhook/resourcepool/claim_validating.go new file mode 100644 index 00000000..0455a22b --- /dev/null +++ b/pkg/webhook/resourcepool/claim_validating.go @@ -0,0 +1,71 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 +package resourcepool + +import ( + "context" + "fmt" + "reflect" + + "github.com/go-logr/logr" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + capsulewebhook "github.com/projectcapsule/capsule/pkg/webhook" + "github.com/projectcapsule/capsule/pkg/webhook/utils" +) + +type claimValidationHandler struct { + log logr.Logger +} + +func ClaimValidationHandler(log logr.Logger) capsulewebhook.Handler { + return &claimValidationHandler{log: log} +} + +func (h *claimValidationHandler) OnCreate(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func { + return func(context.Context, admission.Request) *admission.Response { + return nil + } +} + +func (h *claimValidationHandler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func { + return func(context.Context, admission.Request) *admission.Response { + return nil + } +} + +func (h *claimValidationHandler) OnUpdate(_ client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func { + return func(_ context.Context, req admission.Request) *admission.Response { + oldClaim := &capsulev1beta2.ResourcePoolClaim{} + newClaim := &capsulev1beta2.ResourcePoolClaim{} + + if err := decoder.DecodeRaw(req.OldObject, oldClaim); err != nil { + return utils.ErroredResponse(fmt.Errorf("failed to decode old object: %w", err)) + } + + if err := decoder.Decode(req, newClaim); err != nil { + return utils.ErroredResponse(fmt.Errorf("failed to decode new object: %w", err)) + } + + if !reflect.DeepEqual(oldClaim.Spec.ResourceClaims, newClaim.Spec.ResourceClaims) { + if oldClaim.IsBoundToResourcePool() { + response := admission.Denied(fmt.Sprintf("cannot change the requested resources while claim is bound to a resourcepool %s", oldClaim.Status.Pool.Name)) + + return &response + } + } + + if !reflect.DeepEqual(oldClaim.Spec.Pool, newClaim.Spec.Pool) { + if oldClaim.IsBoundToResourcePool() { + response := admission.Denied(fmt.Sprintf("cannot change the pool while claim is bound to a resourcepool %s", oldClaim.Status.Pool.Name)) + + return &response + } + } + + return nil + } +} diff --git a/pkg/webhook/resourcepool/pool_mutating.go b/pkg/webhook/resourcepool/pool_mutating.go new file mode 100644 index 00000000..61db8762 --- /dev/null +++ b/pkg/webhook/resourcepool/pool_mutating.go @@ -0,0 +1,99 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 +package resourcepool + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + capsulewebhook "github.com/projectcapsule/capsule/pkg/webhook" + "github.com/projectcapsule/capsule/pkg/webhook/utils" +) + +type poolMutationHandler struct { + log logr.Logger +} + +func PoolMutationHandler(log logr.Logger) capsulewebhook.Handler { + return &poolMutationHandler{log: log} +} + +func (h *poolMutationHandler) OnCreate(_ client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func { + return func(_ context.Context, req admission.Request) *admission.Response { + return h.handle(req, decoder) + } +} + +func (h *poolMutationHandler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func { + return func(context.Context, admission.Request) *admission.Response { + return nil + } +} + +func (h *poolMutationHandler) OnUpdate(_ client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func { + return func(_ context.Context, req admission.Request) *admission.Response { + return h.handle(req, decoder) + } +} + +func (h *poolMutationHandler) handle( + req admission.Request, + decoder admission.Decoder, +) *admission.Response { + pool := &capsulev1beta2.ResourcePool{} + if err := decoder.Decode(req, pool); err != nil { + return utils.ErroredResponse(fmt.Errorf("failed to decode object: %w", err)) + } + + // Correctly set the defaults + h.handleDefaults(pool) + + // Marshal Manifest + marshaled, err := json.Marshal(pool) + if err != nil { + response := admission.Errored(http.StatusInternalServerError, err) + + return &response + } + + response := admission.PatchResponseFromRaw(req.Object.Raw, marshaled) + + return &response +} + +// Handles the Default Property. This is done at admission, to prevent and reconcile loops +// from gitops engines when ignores are not correctly set. +func (h *poolMutationHandler) handleDefaults( + pool *capsulev1beta2.ResourcePool, +) { + if !*pool.Spec.Config.DefaultsAssignZero { + return + } + + if pool.Spec.Defaults == nil { + pool.Spec.Defaults = corev1.ResourceList{} + } + + defaults := pool.Spec.Defaults + + for resourceName := range pool.Spec.Quota.Hard { + amount, exists := pool.Spec.Defaults[resourceName] + if !exists { + amount = resource.MustParse("0") + } + + defaults[resourceName] = amount + } + + pool.Spec.Defaults = defaults +} diff --git a/pkg/webhook/resourcepool/pool_validation.go b/pkg/webhook/resourcepool/pool_validation.go new file mode 100644 index 00000000..270da08f --- /dev/null +++ b/pkg/webhook/resourcepool/pool_validation.go @@ -0,0 +1,90 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 +package resourcepool + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" + capsulewebhook "github.com/projectcapsule/capsule/pkg/webhook" + "github.com/projectcapsule/capsule/pkg/webhook/utils" +) + +type poolValidationHandler struct { + log logr.Logger +} + +func PoolValidationHandler(log logr.Logger) capsulewebhook.Handler { + return &poolValidationHandler{log: log} +} + +func (h *poolValidationHandler) OnCreate(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func { + return func(context.Context, admission.Request) *admission.Response { + return nil + } +} + +func (h *poolValidationHandler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func { + return func(context.Context, admission.Request) *admission.Response { + return nil + } +} + +func (h *poolValidationHandler) OnUpdate(_ client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func { + return func(_ context.Context, req admission.Request) *admission.Response { + oldPool := &capsulev1beta2.ResourcePool{} + if err := decoder.DecodeRaw(req.OldObject, oldPool); err != nil { + return utils.ErroredResponse(err) + } + + pool := &capsulev1beta2.ResourcePool{} + if err := decoder.Decode(req, pool); err != nil { + return utils.ErroredResponse(err) + } + + // Verify if resource decrease is allowed or no + if !equality.Semantic.DeepEqual(pool.Spec.Quota.Hard, oldPool.Spec.Quota.Hard) { + zeroValue := resource.MustParse("0") + + for resourceName, qt := range oldPool.Status.Allocation.Claimed { + allocation, exists := pool.Spec.Quota.Hard[resourceName] + + if !exists { + // May remove resources when unused + if zeroValue.Cmp(qt) == 0 { + continue + } + + response := admission.Denied(fmt.Sprintf( + "can not remove resource %s as it is still being allocated. Remove corresponding claims or keep the resources in the pool", + resourceName, + )) + + return &response + } + + if allocation.Cmp(qt) < 0 { + response := admission.Denied( + fmt.Sprintf( + "can not reduce %s usage to %s because quantity %s is claimed . Remove corresponding claims or keep the resources in the pool", + resourceName, + allocation.String(), + qt.String(), + )) + + return &response + } + } + } + + return nil + } +} diff --git a/pkg/webhook/route/resourcepool.go b/pkg/webhook/route/resourcepool.go new file mode 100644 index 00000000..6a668712 --- /dev/null +++ b/pkg/webhook/route/resourcepool.go @@ -0,0 +1,72 @@ +// Copyright 2020-2023 Project Capsule Authors. +// SPDX-License-Identifier: Apache-2.0 + +package route + +import ( + capsulewebhook "github.com/projectcapsule/capsule/pkg/webhook" +) + +type poolmutation struct { + handlers []capsulewebhook.Handler +} + +func ResourcePoolMutation(handler ...capsulewebhook.Handler) capsulewebhook.Webhook { + return &poolmutation{handlers: handler} +} + +func (w *poolmutation) GetHandlers() []capsulewebhook.Handler { + return w.handlers +} + +func (w *poolmutation) GetPath() string { + return "/resourcepool/mutating" +} + +type poolclaimmutation struct { + handlers []capsulewebhook.Handler +} + +func ResourcePoolClaimMutation(handler ...capsulewebhook.Handler) capsulewebhook.Webhook { + return &poolclaimmutation{handlers: handler} +} + +func (w *poolclaimmutation) GetHandlers() []capsulewebhook.Handler { + return w.handlers +} + +func (w *poolclaimmutation) GetPath() string { + return "/resourcepool/claim/mutating" +} + +type poolValidation struct { + handlers []capsulewebhook.Handler +} + +func ResourcePoolValidation(handler ...capsulewebhook.Handler) capsulewebhook.Webhook { + return &poolValidation{handlers: handler} +} + +func (w *poolValidation) GetHandlers() []capsulewebhook.Handler { + return w.handlers +} + +func (w *poolValidation) GetPath() string { + return "/resourcepool/validating" +} + +type poolclaimValidation struct { + handlers []capsulewebhook.Handler +} + +func ResourcePoolClaimValidation(handler ...capsulewebhook.Handler) capsulewebhook.Webhook { + return &poolclaimValidation{handlers: handler} +} + +func (w *poolclaimValidation) GetHandlers() []capsulewebhook.Handler { + return w.handlers +} + +func (w *poolclaimValidation) GetPath() string { + return "/resourcepool/claim/validating" +}