feat(api): add resourcepools and claims (#1333)

* feat: functional appsets

* feat(api): add resourcepools api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: fix gomod

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: correct webhooks

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: fix harpoon image

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: improve e2e

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: add labels to e2e test

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: fix status handling

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: fix racing conditions

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: make values compatible

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: fix custom resources test

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: correct metrics

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

---------

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>
This commit is contained in:
Oliver Bähler
2025-05-22 09:07:13 +02:00
committed by GitHub
parent f143abc481
commit c7237f802b
115 changed files with 7221 additions and 116 deletions

1
.gitignore vendored
View File

@@ -7,6 +7,7 @@
*.dylib *.dylib
bin bin
dist/ dist/
config/
# Test binary, build with `go test -c` # Test binary, build with `go test -c`
*.test *.test

View File

@@ -56,6 +56,10 @@ linters:
- third_party$ - third_party$
- builtin$ - builtin$
- examples$ - examples$
rules:
- path: pkg/meta/
linters:
- dupl
formatters: formatters:
enable: enable:
- gci - gci

View File

@@ -4,6 +4,6 @@ defaultPlatforms:
- linux/arm - linux/arm
builds: builds:
- id: capsule - id: capsule
main: ./ main: ./cmd/
ldflags: ldflags:
- '{{ if index .Env "LD_FLAGS" }}{{ .Env.LD_FLAGS }}{{ end }}' - '{{ if index .Env "LD_FLAGS" }}{{ .Env.LD_FLAGS }}{{ end }}'

View File

@@ -39,3 +39,8 @@ repos:
entry: make golint entry: make golint
language: system language: system
files: \.go$ files: \.go$
- id: go-test
name: Execute go test
entry: make test
language: system
files: \.go$

View File

@@ -5,7 +5,7 @@ FROM ${TARGET_IMAGE} AS target
# Inject Harpoon Image # Inject Harpoon Image
FROM ghcr.io/alegrey91/harpoon:latest FROM ghcr.io/alegrey91/harpoon:latest
WORKDIR / WORKDIR /
COPY --from=target /ko-app/capsule ./manager COPY --from=target /ko-app/cmd ./manager
RUN chmod +x ./harpoon RUN chmod +x ./harpoon
ENTRYPOINT ["/harpoon", \ ENTRYPOINT ["/harpoon", \
"capture", \ "capture", \

View File

@@ -178,7 +178,7 @@ LD_FLAGS := "-X main.Version=$(VERSION) \
ko-build-capsule: ko ko-build-capsule: ko
@echo Building Capsule $(KO_TAGS) for $(KO_PLATFORM) >&2 @echo Building Capsule $(KO_TAGS) for $(KO_PLATFORM) >&2
@LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \ @LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \
$(KO) build ./ --bare --tags=$(KO_TAGS) --push=false --local --platform=$(KO_PLATFORM) $(KO) build ./cmd/ --bare --tags=$(KO_TAGS) --push=false --local --platform=$(KO_PLATFORM)
.PHONY: ko-build-all .PHONY: ko-build-all
ko-build-all: ko-build-capsule ko-build-all: ko-build-capsule
@@ -204,7 +204,7 @@ ko-login: ko
.PHONY: ko-publish-capsule .PHONY: ko-publish-capsule
ko-publish-capsule: ko-login ## Build and publish kyvernopre image (with ko) ko-publish-capsule: ko-login ## Build and publish kyvernopre image (with ko)
@LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \ @LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \
$(KO) build ./ --bare --tags=$(KO_TAGS) $(KO) build ./cmd/ --bare --tags=$(KO_TAGS)
.PHONY: ko-publish-all .PHONY: ko-publish-all
ko-publish-all: ko-publish-capsule ko-publish-all: ko-publish-capsule

22
PROJECT
View File

@@ -1,6 +1,10 @@
# Code generated by tool. DO NOT EDIT.
# This file is used to track the info used to scaffold your project
# and allow the plugins properly work.
# More info: https://book.kubebuilder.io/reference/project-config.html
domain: clastix.io domain: clastix.io
layout: layout:
- go.kubebuilder.io/v3 - go.kubebuilder.io/v4
plugins: plugins:
manifests.sdk.operatorframework.io/v2: {} manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {}
@@ -44,4 +48,20 @@ resources:
kind: GlobalTenantResource kind: GlobalTenantResource
path: github.com/projectcapsule/capsule/api/v1beta2 path: github.com/projectcapsule/capsule/api/v1beta2
version: v1beta2 version: v1beta2
- api:
crdVersion: v1
domain: clastix.io
group: capsule
kind: ResourcePool
path: github.com/projectcapsule/capsule/api/v1beta2
version: v1beta2
- api:
crdVersion: v1
namespaced: true
controller: true
domain: clastix.io
group: capsule
kind: ResourcePoolClaim
path: github.com/projectcapsule/capsule/api/v1beta2
version: v1beta2
version: "3" version: "3"

View File

@@ -0,0 +1,276 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"errors"
"sort"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/projectcapsule/capsule/pkg/api"
)
func (r *ResourcePool) AssignNamespaces(namespaces []corev1.Namespace) {
var l []string
for _, ns := range namespaces {
if ns.Status.Phase == corev1.NamespaceActive && ns.DeletionTimestamp == nil {
l = append(l, ns.GetName())
}
}
sort.Strings(l)
r.Status.NamespaceSize = uint(len(l))
r.Status.Namespaces = l
}
func (r *ResourcePool) AssignClaims() {
var size uint
for _, claims := range r.Status.Claims {
for range claims {
size++
}
}
r.Status.ClaimSize = size
}
func (r *ResourcePool) GetClaimFromStatus(cl *ResourcePoolClaim) *ResourcePoolClaimsItem {
ns := cl.Namespace
claims := r.Status.Claims[ns]
if claims == nil {
return nil
}
for _, claim := range claims {
if claim.UID == cl.UID {
return claim
}
}
return nil
}
func (r *ResourcePool) AddClaimToStatus(claim *ResourcePoolClaim) {
ns := claim.Namespace
if r.Status.Claims == nil {
r.Status.Claims = ResourcePoolNamespaceClaimsStatus{}
}
if r.Status.Allocation.Claimed == nil {
r.Status.Allocation.Claimed = corev1.ResourceList{}
}
claims := r.Status.Claims[ns]
if claims == nil {
claims = ResourcePoolClaimsList{}
}
scl := &ResourcePoolClaimsItem{
StatusNameUID: api.StatusNameUID{
UID: claim.UID,
Name: api.Name(claim.Name),
},
Claims: claim.Spec.ResourceClaims,
}
// Try to update existing entry if UID matches
exists := false
for i, cl := range claims {
if cl.UID == claim.UID {
claims[i] = scl
exists = true
break
}
}
if !exists {
claims = append(claims, scl)
}
r.Status.Claims[ns] = claims
r.CalculateClaimedResources()
}
func (r *ResourcePool) RemoveClaimFromStatus(claim *ResourcePoolClaim) {
newClaims := ResourcePoolClaimsList{}
claims, ok := r.Status.Claims[claim.Namespace]
if !ok {
return
}
for _, cl := range claims {
if cl.UID != claim.UID {
newClaims = append(newClaims, cl)
}
}
r.Status.Claims[claim.Namespace] = newClaims
if len(newClaims) == 0 {
delete(r.Status.Claims, claim.Namespace)
}
}
func (r *ResourcePool) CalculateClaimedResources() {
usage := corev1.ResourceList{}
for res := range r.Status.Allocation.Hard {
usage[res] = resource.MustParse("0")
}
for _, claims := range r.Status.Claims {
for _, claim := range claims {
for resourceName, qt := range claim.Claims {
amount, exists := usage[resourceName]
if !exists {
amount = resource.MustParse("0")
}
amount.Add(qt)
usage[resourceName] = amount
}
}
}
r.Status.Allocation.Claimed = usage
r.CalculateAvailableResources()
}
func (r *ResourcePool) CalculateAvailableResources() {
available := corev1.ResourceList{}
for res, qt := range r.Status.Allocation.Hard {
amount, exists := r.Status.Allocation.Claimed[res]
if exists {
qt.Sub(amount)
}
available[res] = qt
}
r.Status.Allocation.Available = available
}
func (r *ResourcePool) CanClaimFromPool(claim corev1.ResourceList) []error {
claimable := r.GetAvailableClaimableResources()
errs := []error{}
for resourceName, req := range claim {
available, exists := claimable[resourceName]
if !exists || available.IsZero() || available.Cmp(req) < 0 {
errs = append(errs, errors.New("not enough resources"+string(resourceName)+"available"))
}
}
return errs
}
func (r *ResourcePool) GetAvailableClaimableResources() corev1.ResourceList {
hard := r.Status.Allocation.Hard.DeepCopy()
for resourceName, qt := range hard {
claimed, exists := r.Status.Allocation.Claimed[resourceName]
if !exists {
claimed = resource.MustParse("0")
}
qt.Sub(claimed)
hard[resourceName] = qt
}
return hard
}
// Gets the Hard specification for the resourcequotas
// This takes into account the default resources being used. However they don't count towards the claim usage
// This can be changed in the future, the default is not calculated as usage because this might interrupt the namespace management
// As we would need to verify if a new namespace with it's defaults still has place in the Pool. Same with attempting to join existing namespaces.
func (r *ResourcePool) GetResourceQuotaHardResources(namespace string) corev1.ResourceList {
_, claimed := r.GetNamespaceClaims(namespace)
for resourceName, amount := range claimed {
if amount.IsZero() {
delete(claimed, resourceName)
}
}
// Only Consider Default, when enabled
for resourceName, amount := range r.Spec.Defaults {
usedValue := claimed[resourceName]
usedValue.Add(amount)
claimed[resourceName] = usedValue
}
return claimed
}
// Gets the total amount of claimed resources for a namespace.
func (r *ResourcePool) GetNamespaceClaims(namespace string) (claims map[string]*ResourcePoolClaimsItem, claimedResources corev1.ResourceList) {
claimedResources = corev1.ResourceList{}
claims = map[string]*ResourcePoolClaimsItem{}
// First, check if quota exists in the status
for ns, cl := range r.Status.Claims {
if ns != namespace {
continue
}
for _, claim := range cl {
for resourceName, claimed := range claim.Claims {
usedValue, usedExists := claimedResources[resourceName]
if !usedExists {
usedValue = resource.MustParse("0") // Default to zero if no used value is found
}
// Combine with claim
usedValue.Add(claimed)
claimedResources[resourceName] = usedValue
}
claims[string(claim.UID)] = claim
}
}
return
}
// Calculate usage for each namespace.
func (r *ResourcePool) GetClaimedByNamespaceClaims() (claims map[string]corev1.ResourceList) {
claims = map[string]corev1.ResourceList{}
// First, check if quota exists in the status
for ns, cl := range r.Status.Claims {
claims[ns] = corev1.ResourceList{}
nsScope := claims[ns]
for _, claim := range cl {
for resourceName, claimed := range claim.Claims {
usedValue, usedExists := nsScope[resourceName]
if !usedExists {
usedValue = resource.MustParse("0")
}
usedValue.Add(claimed)
nsScope[resourceName] = usedValue
}
}
}
return
}

View File

@@ -0,0 +1,292 @@
package v1beta2
import (
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/meta"
"github.com/stretchr/testify/assert"
)
func TestGetClaimFromStatus(t *testing.T) {
ns := "test-namespace"
testUID := types.UID("test-uid")
otherUID := types.UID("wrong-uid")
claim := &ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claim-a",
Namespace: ns,
UID: testUID,
},
}
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
ns: {
&ResourcePoolClaimsItem{
StatusNameUID: api.StatusNameUID{
UID: testUID,
},
Claims: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("256Mi"),
},
},
},
},
},
}
t.Run("returns matching claim", func(t *testing.T) {
found := pool.GetClaimFromStatus(claim)
assert.NotNil(t, found)
assert.Equal(t, testUID, found.UID)
})
t.Run("returns nil if UID doesn't match", func(t *testing.T) {
claimWrongUID := *claim
claimWrongUID.UID = otherUID
found := pool.GetClaimFromStatus(&claimWrongUID)
assert.Nil(t, found)
})
t.Run("returns nil if namespace has no claims", func(t *testing.T) {
claimWrongNS := *claim
claimWrongNS.Namespace = "other-ns"
found := pool.GetClaimFromStatus(&claimWrongNS)
assert.Nil(t, found)
})
}
func makeResourceList(cpu, memory string) corev1.ResourceList {
return corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse(cpu),
corev1.ResourceLimitsMemory: resource.MustParse(memory),
}
}
func makeClaim(name, ns string, uid types.UID, res corev1.ResourceList) *ResourcePoolClaim {
return &ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
UID: uid,
},
Spec: ResourcePoolClaimSpec{
ResourceClaims: res,
},
}
}
func TestAssignNamespaces(t *testing.T) {
pool := &ResourcePool{}
namespaces := []corev1.Namespace{
{ObjectMeta: metav1.ObjectMeta{Name: "active-ns"}, Status: corev1.NamespaceStatus{Phase: corev1.NamespaceActive}},
{ObjectMeta: metav1.ObjectMeta{Name: "terminating-ns", DeletionTimestamp: &metav1.Time{}}, Status: corev1.NamespaceStatus{Phase: corev1.NamespaceTerminating}},
}
pool.AssignNamespaces(namespaces)
assert.Equal(t, uint(1), pool.Status.NamespaceSize)
assert.Equal(t, []string{"active-ns"}, pool.Status.Namespaces)
}
func TestAssignClaims(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{},
&ResourcePoolClaimsItem{},
},
},
},
}
pool.AssignClaims()
assert.Equal(t, uint(2), pool.Status.ClaimSize)
}
func TestAddRemoveClaimToStatus(t *testing.T) {
pool := &ResourcePool{}
claim := makeClaim("claim-1", "ns", "uid-1", makeResourceList("1", "1Gi"))
pool.AddClaimToStatus(claim)
stored := pool.GetClaimFromStatus(claim)
assert.NotNil(t, stored)
assert.Equal(t, api.Name("claim-1"), stored.Name)
pool.RemoveClaimFromStatus(claim)
assert.Nil(t, pool.GetClaimFromStatus(claim))
}
func TestCalculateResources(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Allocation: ResourcePoolQuotaStatus{
Hard: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("2"),
},
},
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{
Claims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
},
},
},
}
pool.CalculateClaimedResources()
actualClaimed := pool.Status.Allocation.Claimed[corev1.ResourceLimitsCPU]
actualAvailable := pool.Status.Allocation.Available[corev1.ResourceLimitsCPU]
assert.Equal(t, 0, (&actualClaimed).Cmp(resource.MustParse("1")))
assert.Equal(t, 0, (&actualAvailable).Cmp(resource.MustParse("1")))
}
func TestCanClaimFromPool(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Allocation: ResourcePoolQuotaStatus{
Hard: corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("1Gi"),
},
Claimed: corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("512Mi"),
},
},
},
}
errs := pool.CanClaimFromPool(corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("1Gi"),
})
assert.Len(t, errs, 1)
errs = pool.CanClaimFromPool(corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("500Mi"),
})
assert.Len(t, errs, 0)
}
func TestGetResourceQuotaHardResources(t *testing.T) {
pool := &ResourcePool{
Spec: ResourcePoolSpec{
Defaults: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{
Claims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
},
},
},
}
res := pool.GetResourceQuotaHardResources("ns")
actual := res[corev1.ResourceLimitsCPU]
assert.Equal(t, 0, (&actual).Cmp(resource.MustParse("2")))
}
func TestGetNamespaceClaims(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{
StatusNameUID: api.StatusNameUID{UID: "uid1"},
Claims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
},
},
},
}
claims, res := pool.GetNamespaceClaims("ns")
assert.Contains(t, claims, "uid1")
actual := res[corev1.ResourceLimitsCPU]
assert.Equal(t, 0, (&actual).Cmp(resource.MustParse("1")))
}
func TestGetClaimedByNamespaceClaims(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns1": {
&ResourcePoolClaimsItem{
Claims: makeResourceList("1", "1Gi"),
},
},
},
},
}
result := pool.GetClaimedByNamespaceClaims()
actualCPU := result["ns1"][corev1.ResourceLimitsCPU]
actualMem := result["ns1"][corev1.ResourceLimitsMemory]
assert.Equal(t, 0, (&actualCPU).Cmp(resource.MustParse("1")))
assert.Equal(t, 0, (&actualMem).Cmp(resource.MustParse("1Gi")))
}
func TestIsBoundToResourcePool_2(t *testing.T) {
t.Run("bound to resource pool (Assigned=True)", func(t *testing.T) {
claim := &ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Condition: metav1.Condition{
Type: meta.BoundCondition,
Status: metav1.ConditionTrue,
},
},
}
assert.Equal(t, true, claim.IsBoundToResourcePool())
})
t.Run("not bound - wrong condition type", func(t *testing.T) {
claim := &ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Condition: metav1.Condition{
Type: "Other",
Status: metav1.ConditionTrue,
},
},
}
assert.Equal(t, false, claim.IsBoundToResourcePool())
})
t.Run("not bound - condition not true", func(t *testing.T) {
claim := &ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Condition: metav1.Condition{
Type: meta.BoundCondition,
Status: metav1.ConditionFalse,
},
},
}
assert.Equal(t, false, claim.IsBoundToResourcePool())
})
}

View File

@@ -0,0 +1,62 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/projectcapsule/capsule/pkg/api"
)
// GlobalResourceQuotaStatus defines the observed state of GlobalResourceQuota.
type ResourcePoolStatus struct {
// How many namespaces are considered
// +kubebuilder:default=0
NamespaceSize uint `json:"namespaceCount,omitempty"`
// Amount of claims
// +kubebuilder:default=0
ClaimSize uint `json:"claimCount,omitempty"`
// Namespaces which are considered for claims
Namespaces []string `json:"namespaces,omitempty"`
// Tracks the quotas for the Resource.
Claims ResourcePoolNamespaceClaimsStatus `json:"claims,omitempty"`
// Tracks the Usage from Claimed against what has been granted from the pool
Allocation ResourcePoolQuotaStatus `json:"allocation,omitempty"`
}
type ResourcePoolNamespaceClaimsStatus map[string]ResourcePoolClaimsList
type ResourcePoolQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard corev1.ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Claimed corev1.ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
// Used to track the usage of the resource in the pool (diff hard - claimed). May be used for further automation
// +optional
Available corev1.ResourceList `json:"available,omitempty" protobuf:"bytes,2,rep,name=available,casttype=ResourceList,castkey=ResourceName"`
}
type ResourcePoolClaimsList []*ResourcePoolClaimsItem
func (r *ResourcePoolClaimsList) GetClaimByUID(uid types.UID) *ResourcePoolClaimsItem {
for _, claim := range *r {
if claim.UID == uid {
return claim
}
}
return nil
}
// ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim.
type ResourcePoolClaimsItem struct {
// Reference to the GlobalQuota being claimed from
api.StatusNameUID `json:",inline"`
// Claimed resources
Claims corev1.ResourceList `json:"claims,omitempty"`
}

View File

@@ -0,0 +1,76 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
// ResourcePoolSpec.
type ResourcePoolSpec struct {
// Selector to match the namespaces that should be managed by the GlobalResourceQuota
Selectors []api.NamespaceSelector `json:"selectors,omitempty"`
// Define the resourcequota served by this resourcepool.
Quota corev1.ResourceQuotaSpec `json:"quota"`
// The Defaults given for each namespace, the default is not counted towards the total allocation
// When you use claims it's recommended to provision Defaults as the prevent the scheduling of any resources
Defaults corev1.ResourceList `json:"defaults,omitempty"`
// Additional Configuration
//+kubebuilder:default:={}
Config ResourcePoolSpecConfiguration `json:"config,omitempty"`
}
type ResourcePoolSpecConfiguration struct {
// With this option all resources which can be allocated are set to 0 for the resourcequota defaults.
// +kubebuilder:default=false
DefaultsAssignZero *bool `json:"defaultsZero,omitempty"`
// Claims are queued whenever they are allocated to a pool. A pool tries to allocate claims in order based on their
// creation date. But no matter their creation time, if a claim is requesting too much resources it's put into the queue
// but if a lower priority claim still has enough space in the available resources, it will be able to claim them. Eventough
// it's priority was lower
// Enabling this option respects to Order. Meaning the Creationtimestamp matters and if a resource is put into the queue, no
// other claim can claim the same resources with lower priority.
// +kubebuilder:default=false
OrderedQueue *bool `json:"orderedQueue,omitempty"`
// When a resourcepool is deleted, the resourceclaims bound to it are disassociated from the resourcepool but not deleted.
// By Enabling this option, the resourceclaims will be deleted when the resourcepool is deleted, if they are in bound state.
// +kubebuilder:default=false
DeleteBoundResources *bool `json:"deleteBoundResources,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster,shortName=quotapool
// +kubebuilder:printcolumn:name="Claims",type="integer",JSONPath=".status.claimCount",description="The total amount of Claims bound"
// +kubebuilder:printcolumn:name="Namespaces",type="integer",JSONPath=".status.namespaceCount",description="The total amount of Namespaces considered"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
// Resourcepools allows you to define a set of resources as known from ResoureQuotas. The Resourcepools are defined at cluster-scope an should
// be administrated by cluster-administrators. However they create an interface, where cluster-administrators can define
// from which namespaces resources from a Resourcepool can be claimed. The claiming is done via a namespaced CRD called ResourcePoolClaim. Then
// it's up the group of users within these namespaces, to manage the resources they consume per namespace. Each Resourcepool provisions a ResourceQuotainto all the selected namespaces. Then essentially the ResourcePoolClaims, when they can be assigned to the ResourcePool stack resources on top of that
// ResourceQuota based on the namspace, where the ResourcePoolClaim was made from.
type ResourcePool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ResourcePoolSpec `json:"spec,omitempty"`
Status ResourcePoolStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ResourcePoolList contains a list of ResourcePool.
type ResourcePoolList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ResourcePool `json:"items"`
}
func init() {
SchemeBuilder.Register(&ResourcePool{}, &ResourcePoolList{})
}

View File

@@ -0,0 +1,20 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/meta"
)
// Indicate the claim is bound to a resource pool.
func (r *ResourcePoolClaim) IsBoundToResourcePool() bool {
if r.Status.Condition.Type == meta.BoundCondition &&
r.Status.Condition.Status == metav1.ConditionTrue {
return true
}
return false
}

View File

@@ -0,0 +1,71 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"testing"
"github.com/projectcapsule/capsule/pkg/meta"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestIsBoundToResourcePool(t *testing.T) {
tests := []struct {
name string
claim ResourcePoolClaim
expected bool
}{
{
name: "bound to resource pool (Assigned=True)",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Condition: metav1.Condition{
Type: meta.BoundCondition,
Status: metav1.ConditionTrue,
},
},
},
expected: true,
},
{
name: "not bound - wrong condition type",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Condition: metav1.Condition{
Type: "SomethingElse",
Status: metav1.ConditionTrue,
},
},
},
expected: false,
},
{
name: "not bound - status not true",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Condition: metav1.Condition{
Type: meta.BoundCondition,
Status: metav1.ConditionFalse,
},
},
},
expected: false,
},
{
name: "not bound - empty condition",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{},
},
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := tt.claim.IsBoundToResourcePool()
assert.Equal(t, tt.expected, actual)
})
}
}

View File

@@ -0,0 +1,58 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
type ResourcePoolClaimSpec struct {
// If there's the possability to claim from multiple global Quotas
// You must be specific about which one you want to claim resources from
// Once bound to a ResourcePool, this field is immutable
Pool string `json:"pool"`
// Amount which should be claimed for the resourcequota
ResourceClaims corev1.ResourceList `json:"claim"`
}
// ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim.
type ResourcePoolClaimStatus struct {
// Reference to the GlobalQuota being claimed from
Pool api.StatusNameUID `json:"pool,omitempty"`
// Condtion for this resource claim
Condition metav1.Condition `json:"condition,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Pool",type="string",JSONPath=".status.pool.name",description="The ResourcePool being claimed from"
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.condition.type",description="Status for claim"
// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.condition.reason",description="Reason for status"
// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.condition.message",description="Condition Message"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// ResourcePoolClaim is the Schema for the resourcepoolclaims API.
type ResourcePoolClaim struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ResourcePoolClaimSpec `json:"spec,omitempty"`
Status ResourcePoolClaimStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ResourceQuotaClaimList contains a list of ResourceQuotaClaim.
type ResourcePoolClaimList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ResourcePoolClaim `json:"items"`
}
func init() {
SchemeBuilder.Register(&ResourcePoolClaim{}, &ResourcePoolClaimList{})
}

View File

@@ -9,6 +9,7 @@ package v1beta2
import ( import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/rbac/v1" "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@@ -517,6 +518,387 @@ func (in *RawExtension) DeepCopy() *RawExtension {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePool) DeepCopyInto(out *ResourcePool) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool.
func (in *ResourcePool) DeepCopy() *ResourcePool {
if in == nil {
return nil
}
out := new(ResourcePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourcePool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolClaim) DeepCopyInto(out *ResourcePoolClaim) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaim.
func (in *ResourcePoolClaim) DeepCopy() *ResourcePoolClaim {
if in == nil {
return nil
}
out := new(ResourcePoolClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourcePoolClaim) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolClaimList) DeepCopyInto(out *ResourcePoolClaimList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourcePoolClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimList.
func (in *ResourcePoolClaimList) DeepCopy() *ResourcePoolClaimList {
if in == nil {
return nil
}
out := new(ResourcePoolClaimList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourcePoolClaimList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolClaimSpec) DeepCopyInto(out *ResourcePoolClaimSpec) {
*out = *in
if in.ResourceClaims != nil {
in, out := &in.ResourceClaims, &out.ResourceClaims
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimSpec.
func (in *ResourcePoolClaimSpec) DeepCopy() *ResourcePoolClaimSpec {
if in == nil {
return nil
}
out := new(ResourcePoolClaimSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolClaimStatus) DeepCopyInto(out *ResourcePoolClaimStatus) {
*out = *in
out.Pool = in.Pool
in.Condition.DeepCopyInto(&out.Condition)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimStatus.
func (in *ResourcePoolClaimStatus) DeepCopy() *ResourcePoolClaimStatus {
if in == nil {
return nil
}
out := new(ResourcePoolClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolClaimsItem) DeepCopyInto(out *ResourcePoolClaimsItem) {
*out = *in
out.StatusNameUID = in.StatusNameUID
if in.Claims != nil {
in, out := &in.Claims, &out.Claims
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimsItem.
func (in *ResourcePoolClaimsItem) DeepCopy() *ResourcePoolClaimsItem {
if in == nil {
return nil
}
out := new(ResourcePoolClaimsItem)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourcePoolClaimsList) DeepCopyInto(out *ResourcePoolClaimsList) {
{
in := &in
*out = make(ResourcePoolClaimsList, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ResourcePoolClaimsItem)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolClaimsList.
func (in ResourcePoolClaimsList) DeepCopy() ResourcePoolClaimsList {
if in == nil {
return nil
}
out := new(ResourcePoolClaimsList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolList) DeepCopyInto(out *ResourcePoolList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourcePool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolList.
func (in *ResourcePoolList) DeepCopy() *ResourcePoolList {
if in == nil {
return nil
}
out := new(ResourcePoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourcePoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourcePoolNamespaceClaimsStatus) DeepCopyInto(out *ResourcePoolNamespaceClaimsStatus) {
{
in := &in
*out = make(ResourcePoolNamespaceClaimsStatus, len(*in))
for key, val := range *in {
var outVal []*ResourcePoolClaimsItem
if val == nil {
(*out)[key] = nil
} else {
inVal := (*in)[key]
in, out := &inVal, &outVal
*out = make(ResourcePoolClaimsList, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ResourcePoolClaimsItem)
(*in).DeepCopyInto(*out)
}
}
}
(*out)[key] = outVal
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolNamespaceClaimsStatus.
func (in ResourcePoolNamespaceClaimsStatus) DeepCopy() ResourcePoolNamespaceClaimsStatus {
if in == nil {
return nil
}
out := new(ResourcePoolNamespaceClaimsStatus)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolQuotaStatus) DeepCopyInto(out *ResourcePoolQuotaStatus) {
*out = *in
if in.Hard != nil {
in, out := &in.Hard, &out.Hard
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Claimed != nil {
in, out := &in.Claimed, &out.Claimed
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Available != nil {
in, out := &in.Available, &out.Available
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolQuotaStatus.
func (in *ResourcePoolQuotaStatus) DeepCopy() *ResourcePoolQuotaStatus {
if in == nil {
return nil
}
out := new(ResourcePoolQuotaStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolSpec) DeepCopyInto(out *ResourcePoolSpec) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]api.NamespaceSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Quota.DeepCopyInto(&out.Quota)
if in.Defaults != nil {
in, out := &in.Defaults, &out.Defaults
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
in.Config.DeepCopyInto(&out.Config)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolSpec.
func (in *ResourcePoolSpec) DeepCopy() *ResourcePoolSpec {
if in == nil {
return nil
}
out := new(ResourcePoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolSpecConfiguration) DeepCopyInto(out *ResourcePoolSpecConfiguration) {
*out = *in
if in.DefaultsAssignZero != nil {
in, out := &in.DefaultsAssignZero, &out.DefaultsAssignZero
*out = new(bool)
**out = **in
}
if in.OrderedQueue != nil {
in, out := &in.OrderedQueue, &out.OrderedQueue
*out = new(bool)
**out = **in
}
if in.DeleteBoundResources != nil {
in, out := &in.DeleteBoundResources, &out.DeleteBoundResources
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolSpecConfiguration.
func (in *ResourcePoolSpecConfiguration) DeepCopy() *ResourcePoolSpecConfiguration {
if in == nil {
return nil
}
out := new(ResourcePoolSpecConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePoolStatus) DeepCopyInto(out *ResourcePoolStatus) {
*out = *in
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Claims != nil {
in, out := &in.Claims, &out.Claims
*out = make(ResourcePoolNamespaceClaimsStatus, len(*in))
for key, val := range *in {
var outVal []*ResourcePoolClaimsItem
if val == nil {
(*out)[key] = nil
} else {
inVal := (*in)[key]
in, out := &inVal, &outVal
*out = make(ResourcePoolClaimsList, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ResourcePoolClaimsItem)
(*in).DeepCopyInto(*out)
}
}
}
(*out)[key] = outVal
}
}
in.Allocation.DeepCopyInto(&out.Allocation)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePoolStatus.
func (in *ResourcePoolStatus) DeepCopy() *ResourcePoolStatus {
if in == nil {
return nil
}
out := new(ResourcePoolStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
*out = *in *out = *in

View File

@@ -134,6 +134,10 @@ Here the values you can override:
| ports | list | `[]` | Set additional ports for the deployment | | ports | list | `[]` | Set additional ports for the deployment |
| priorityClassName | string | `""` | Set the priority class name of the Capsule pod | | priorityClassName | string | `""` | Set the priority class name of the Capsule pod |
| proxy.enabled | bool | `false` | Enable Installation of Capsule Proxy | | proxy.enabled | bool | `false` | Enable Installation of Capsule Proxy |
| rbac.resourcepoolclaims.create | bool | `false` | |
| rbac.resourcepoolclaims.labels."rbac.authorization.k8s.io/aggregate-to-admin" | string | `"true"` | |
| rbac.resources.create | bool | `false` | |
| rbac.resources.labels."rbac.authorization.k8s.io/aggregate-to-admin" | string | `"true"` | |
| replicaCount | int | `1` | Set the replica count for capsule pod | | replicaCount | int | `1` | Set the replica count for capsule pod |
| securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true}` | Set the securityContext for the Capsule container | | securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true}` | Set the securityContext for the Capsule container |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
@@ -234,6 +238,16 @@ Here the values you can override:
| webhooks.hooks.pods.failurePolicy | string | `"Fail"` | | | webhooks.hooks.pods.failurePolicy | string | `"Fail"` | |
| webhooks.hooks.pods.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | | | webhooks.hooks.pods.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
| webhooks.hooks.pods.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | | | webhooks.hooks.pods.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |
| webhooks.hooks.resourcepools.claims.failurePolicy | string | `"Fail"` | |
| webhooks.hooks.resourcepools.claims.matchPolicy | string | `"Equivalent"` | |
| webhooks.hooks.resourcepools.claims.namespaceSelector | object | `{}` | |
| webhooks.hooks.resourcepools.claims.objectSelector | object | `{}` | |
| webhooks.hooks.resourcepools.claims.reinvocationPolicy | string | `"Never"` | |
| webhooks.hooks.resourcepools.pools.failurePolicy | string | `"Fail"` | |
| webhooks.hooks.resourcepools.pools.matchPolicy | string | `"Equivalent"` | |
| webhooks.hooks.resourcepools.pools.namespaceSelector | object | `{}` | |
| webhooks.hooks.resourcepools.pools.objectSelector | object | `{}` | |
| webhooks.hooks.resourcepools.pools.reinvocationPolicy | string | `"Never"` | |
| webhooks.hooks.services.failurePolicy | string | `"Fail"` | | | webhooks.hooks.services.failurePolicy | string | `"Fail"` | |
| webhooks.hooks.services.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | | | webhooks.hooks.services.namespaceSelector.matchExpressions[0].key | string | `"capsule.clastix.io/tenant"` | |
| webhooks.hooks.services.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | | | webhooks.hooks.services.namespaceSelector.matchExpressions[0].operator | string | `"Exists"` | |

View File

@@ -0,0 +1,158 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.18.0
name: resourcepoolclaims.capsule.clastix.io
spec:
group: capsule.clastix.io
names:
kind: ResourcePoolClaim
listKind: ResourcePoolClaimList
plural: resourcepoolclaims
singular: resourcepoolclaim
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: The ResourcePool being claimed from
jsonPath: .status.pool.name
name: Pool
type: string
- description: Status for claim
jsonPath: .status.condition.type
name: Status
type: string
- description: Reason for status
jsonPath: .status.condition.reason
name: Reason
type: string
- description: Condition Message
jsonPath: .status.condition.message
name: Message
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta2
schema:
openAPIV3Schema:
description: ResourcePoolClaim is the Schema for the resourcepoolclaims API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
properties:
claim:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Amount which should be claimed for the resourcequota
type: object
pool:
description: |-
If there's the possability to claim from multiple global Quotas
You must be specific about which one you want to claim resources from
Once bound to a ResourcePool, this field is immutable
type: string
required:
- claim
- pool
type: object
status:
description: ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim.
properties:
condition:
description: Condtion for this resource claim
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
pool:
description: Reference to the GlobalQuota being claimed from
properties:
name:
description: Name
maxLength: 253
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
namespace:
description: Namespace
maxLength: 253
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
uid:
description: UID of the tracked Tenant to pin point tracking
type: string
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,308 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.18.0
name: resourcepools.capsule.clastix.io
spec:
group: capsule.clastix.io
names:
kind: ResourcePool
listKind: ResourcePoolList
plural: resourcepools
shortNames:
- quotapool
singular: resourcepool
scope: Cluster
versions:
- additionalPrinterColumns:
- description: The total amount of Claims bound
jsonPath: .status.claimCount
name: Claims
type: integer
- description: The total amount of Namespaces considered
jsonPath: .status.namespaceCount
name: Namespaces
type: integer
- description: Age
jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta2
schema:
openAPIV3Schema:
description: |-
Resourcepools allows you to define a set of resources as known from ResoureQuotas. The Resourcepools are defined at cluster-scope an should
be administrated by cluster-administrators. However they create an interface, where cluster-administrators can define
from which namespaces resources from a Resourcepool can be claimed. The claiming is done via a namespaced CRD called ResourcePoolClaim. Then
it's up the group of users within these namespaces, to manage the resources they consume per namespace. Each Resourcepool provisions a ResourceQuotainto all the selected namespaces. Then essentially the ResourcePoolClaims, when they can be assigned to the ResourcePool stack resources on top of that
ResourceQuota based on the namspace, where the ResourcePoolClaim was made from.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ResourcePoolSpec.
properties:
config:
default: {}
description: Additional Configuration
properties:
defaultsZero:
default: false
description: With this option all resources which can be allocated
are set to 0 for the resourcequota defaults.
type: boolean
deleteBoundResources:
default: false
description: |-
When a resourcepool is deleted, the resourceclaims bound to it are disassociated from the resourcepool but not deleted.
By Enabling this option, the resourceclaims will be deleted when the resourcepool is deleted, if they are in bound state.
type: boolean
orderedQueue:
default: false
description: |-
Claims are queued whenever they are allocated to a pool. A pool tries to allocate claims in order based on their
creation date. But no matter their creation time, if a claim is requesting too much resources it's put into the queue
but if a lower priority claim still has enough space in the available resources, it will be able to claim them. Eventough
it's priority was lower
Enabling this option respects to Order. Meaning the Creationtimestamp matters and if a resource is put into the queue, no
other claim can claim the same resources with lower priority.
type: boolean
type: object
defaults:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
The Defaults given for each namespace, the default is not counted towards the total allocation
When you use claims it's recommended to provision Defaults as the prevent the scheduling of any resources
type: object
quota:
description: Define the resourcequota served by this resourcepool.
properties:
hard:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
hard is the set of desired hard limits for each named resource.
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
type: object
scopeSelector:
description: |-
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
but expressed using ScopeSelectorOperator in combination with possible values.
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
properties:
matchExpressions:
description: A list of scope selector requirements by scope
of the resources.
items:
description: |-
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
that relates the scope name and values.
properties:
operator:
description: |-
Represents a scope's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist.
type: string
scopeName:
description: The name of the scope that the selector
applies to.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- operator
- scopeName
type: object
type: array
x-kubernetes-list-type: atomic
type: object
x-kubernetes-map-type: atomic
scopes:
description: |-
A collection of filters that must match each object tracked by a quota.
If not specified, the quota matches all objects.
items:
description: A ResourceQuotaScope defines a filter that must
match each object tracked by a quota
type: string
type: array
x-kubernetes-list-type: atomic
type: object
selectors:
description: Selector to match the namespaces that should be managed
by the GlobalResourceQuota
items:
description: Selector for resources and their labels or selecting
origin namespaces
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: array
required:
- quota
type: object
status:
description: GlobalResourceQuotaStatus defines the observed state of GlobalResourceQuota.
properties:
allocation:
description: Tracks the Usage from Claimed against what has been granted
from the pool
properties:
available:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Used to track the usage of the resource in the pool
(diff hard - claimed). May be used for further automation
type: object
hard:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
Hard is the set of enforced hard limits for each named resource.
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
type: object
used:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Used is the current observed total usage of the resource
in the namespace.
type: object
type: object
claimCount:
default: 0
description: Amount of claims
type: integer
claims:
additionalProperties:
items:
description: ResourceQuotaClaimStatus defines the observed state
of ResourceQuotaClaim.
properties:
claims:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Claimed resources
type: object
name:
description: Name
maxLength: 253
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
namespace:
description: Namespace
maxLength: 253
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
uid:
description: UID of the tracked Tenant to pin point tracking
type: string
type: object
type: array
description: Tracks the quotas for the Resource.
type: object
namespaceCount:
default: 0
description: How many namespaces are considered
type: integer
namespaces:
description: Namespaces which are considered for claims
items:
type: string
type: array
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -135,5 +135,57 @@ webhooks:
scope: '*' scope: '*'
sideEffects: NoneOnDryRun sideEffects: NoneOnDryRun
timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }} timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }}
{{- end }} {{- end }}
{{- with .Values.webhooks.hooks.resourcepools.pools }}
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
{{- include "capsule.webhooks.service" (dict "path" "/resourcepool/mutating" "ctx" $) | nindent 4 }}
failurePolicy: {{ .failurePolicy }}
matchPolicy: {{ .matchPolicy }}
name: resourcepools.projectcapsule.dev
namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }}
objectSelector: {{ toYaml .objectSelector | nindent 4 }}
reinvocationPolicy: {{ .reinvocationPolicy }}
rules:
- apiGroups:
- "capsule.clastix.io"
apiVersions:
- "*"
operations:
- CREATE
- UPDATE
resources:
- resourcepools
scope: '*'
sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }}
{{- end }}
{{- with .Values.webhooks.hooks.resourcepools.claims }}
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
{{- include "capsule.webhooks.service" (dict "path" "/resourcepool/claim/mutating" "ctx" $) | nindent 4 }}
failurePolicy: {{ .failurePolicy }}
matchPolicy: {{ .matchPolicy }}
name: resourcepoolclaims.projectcapsule.dev
namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }}
objectSelector: {{ toYaml .objectSelector | nindent 4 }}
reinvocationPolicy: {{ .reinvocationPolicy }}
rules:
- apiGroups:
- "capsule.clastix.io"
apiVersions:
- "*"
operations:
- CREATE
- UPDATE
resources:
- resourcepoolclaims
scope: '*'
sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.mutatingWebhooksTimeoutSeconds }}
{{- end }}
{{- end }} {{- end }}

View File

@@ -0,0 +1,24 @@
{{- if $.Values.rbac.resourcepoolclaims.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "capsule.fullname" $ }}-resourcepoolclaims
labels:
{{- toYaml $.Values.rbac.resourcepoolclaims.labels | nindent 4 }}
rules:
- apiGroups: ["capsule.clastix.io"]
resources: ["resourcepoolclaims"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
{{- end }}
{{- if $.Values.rbac.resources.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "capsule.fullname" $ }}-resources
labels:
{{- toYaml $.Values.rbac.resources.labels | nindent 4 }}
rules:
- apiGroups: ["capsule.clastix.io"]
resources: ["tenantresources"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
{{- end }}

View File

@@ -274,7 +274,7 @@ webhooks:
sideEffects: None sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
{{- end }} {{- end }}
{{- with .Values.webhooks.hooks.tenants }} {{- with .Values.webhooks.hooks.tenants }}
- admissionReviewVersions: - admissionReviewVersions:
- v1 - v1
- v1beta1 - v1beta1
@@ -299,7 +299,57 @@ webhooks:
scope: '*' scope: '*'
sideEffects: None sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
{{- end }} {{- end }}
{{- with .Values.webhooks.hooks.resourcepools.pools }}
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
{{- include "capsule.webhooks.service" (dict "path" "/resourcepool/validating" "ctx" $) | nindent 4 }}
failurePolicy: {{ .failurePolicy }}
matchPolicy: {{ .matchPolicy }}
name: resourcepools.projectcapsule.dev
namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }}
objectSelector: {{ toYaml .objectSelector | nindent 4 }}
rules:
- apiGroups:
- "capsule.clastix.io"
apiVersions:
- "*"
operations:
- CREATE
- UPDATE
resources:
- resourcepools
scope: '*'
sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
{{- end }}
{{- with .Values.webhooks.hooks.resourcepools.pools }}
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
{{- include "capsule.webhooks.service" (dict "path" "/resourcepool/claim/validating" "ctx" $) | nindent 4 }}
failurePolicy: {{ .failurePolicy }}
matchPolicy: {{ .matchPolicy }}
name: resourcepoolclaims.projectcapsule.dev
namespaceSelector: {{ toYaml .namespaceSelector | nindent 4 }}
objectSelector: {{ toYaml .objectSelector | nindent 4 }}
rules:
- apiGroups:
- "capsule.clastix.io"
apiVersions:
- "*"
operations:
- CREATE
- UPDATE
resources:
- resourcepoolclaims
scope: '*'
sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
{{- end }}
{{- with .Values.webhooks.hooks.customresources }} {{- with .Values.webhooks.hooks.customresources }}
- admissionReviewVersions: - admissionReviewVersions:
- v1 - v1
@@ -332,4 +382,4 @@ webhooks:
sideEffects: None sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }} timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@@ -375,6 +375,43 @@
}, },
"type": "object" "type": "object"
}, },
"rbac": {
"properties": {
"resourcepoolclaims": {
"properties": {
"create": {
"type": "boolean"
},
"labels": {
"properties": {
"rbac.authorization.k8s.io/aggregate-to-admin": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"resources": {
"properties": {
"create": {
"type": "boolean"
},
"labels": {
"properties": {
"rbac.authorization.k8s.io/aggregate-to-admin": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
}
},
"type": "object"
},
"replicaCount": { "replicaCount": {
"type": "integer" "type": "integer"
}, },
@@ -817,6 +854,55 @@
}, },
"type": "object" "type": "object"
}, },
"resourcepools": {
"properties": {
"claims": {
"properties": {
"failurePolicy": {
"type": "string"
},
"matchPolicy": {
"type": "string"
},
"namespaceSelector": {
"properties": {},
"type": "object"
},
"objectSelector": {
"properties": {},
"type": "object"
},
"reinvocationPolicy": {
"type": "string"
}
},
"type": "object"
},
"pools": {
"properties": {
"failurePolicy": {
"type": "string"
},
"matchPolicy": {
"type": "string"
},
"namespaceSelector": {
"properties": {},
"type": "object"
},
"objectSelector": {
"properties": {},
"type": "object"
},
"reinvocationPolicy": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"services": { "services": {
"properties": { "properties": {
"failurePolicy": { "failurePolicy": {

View File

@@ -76,6 +76,17 @@ proxy:
# -- Enable Installation of Capsule Proxy # -- Enable Installation of Capsule Proxy
enabled: false enabled: false
# These are ClusterRoles which grant permissions for Capsule CRDs to Tenant Owners
rbac:
resources:
create: false
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
resourcepoolclaims:
create: false
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
# Manager Options # Manager Options
manager: manager:
@@ -265,6 +276,19 @@ webhooks:
# Hook Configuration # Hook Configuration
hooks: hooks:
resourcepools:
pools:
namespaceSelector: {}
objectSelector: {}
reinvocationPolicy: Never
matchPolicy: Equivalent
failurePolicy: Fail
claims:
namespaceSelector: {}
objectSelector: {}
reinvocationPolicy: Never
matchPolicy: Equivalent
failurePolicy: Fail
namespaceOwnerReference: namespaceOwnerReference:
failurePolicy: Fail failurePolicy: Fail
customresources: customresources:
@@ -353,6 +377,7 @@ webhooks:
- key: capsule.clastix.io/tenant - key: capsule.clastix.io/tenant
operator: Exists operator: Exists
# ServiceMonitor # ServiceMonitor
serviceMonitor: serviceMonitor:
# -- Enable ServiceMonitor # -- Enable ServiceMonitor

View File

@@ -35,12 +35,14 @@ import (
podlabelscontroller "github.com/projectcapsule/capsule/controllers/pod" podlabelscontroller "github.com/projectcapsule/capsule/controllers/pod"
"github.com/projectcapsule/capsule/controllers/pv" "github.com/projectcapsule/capsule/controllers/pv"
rbaccontroller "github.com/projectcapsule/capsule/controllers/rbac" rbaccontroller "github.com/projectcapsule/capsule/controllers/rbac"
"github.com/projectcapsule/capsule/controllers/resourcepools"
"github.com/projectcapsule/capsule/controllers/resources" "github.com/projectcapsule/capsule/controllers/resources"
servicelabelscontroller "github.com/projectcapsule/capsule/controllers/servicelabels" servicelabelscontroller "github.com/projectcapsule/capsule/controllers/servicelabels"
tenantcontroller "github.com/projectcapsule/capsule/controllers/tenant" tenantcontroller "github.com/projectcapsule/capsule/controllers/tenant"
tlscontroller "github.com/projectcapsule/capsule/controllers/tls" tlscontroller "github.com/projectcapsule/capsule/controllers/tls"
"github.com/projectcapsule/capsule/pkg/configuration" "github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/indexer" "github.com/projectcapsule/capsule/pkg/indexer"
"github.com/projectcapsule/capsule/pkg/metrics"
"github.com/projectcapsule/capsule/pkg/webhook" "github.com/projectcapsule/capsule/pkg/webhook"
"github.com/projectcapsule/capsule/pkg/webhook/defaults" "github.com/projectcapsule/capsule/pkg/webhook/defaults"
"github.com/projectcapsule/capsule/pkg/webhook/gateway" "github.com/projectcapsule/capsule/pkg/webhook/gateway"
@@ -51,6 +53,7 @@ import (
"github.com/projectcapsule/capsule/pkg/webhook/node" "github.com/projectcapsule/capsule/pkg/webhook/node"
"github.com/projectcapsule/capsule/pkg/webhook/pod" "github.com/projectcapsule/capsule/pkg/webhook/pod"
"github.com/projectcapsule/capsule/pkg/webhook/pvc" "github.com/projectcapsule/capsule/pkg/webhook/pvc"
"github.com/projectcapsule/capsule/pkg/webhook/resourcepool"
"github.com/projectcapsule/capsule/pkg/webhook/route" "github.com/projectcapsule/capsule/pkg/webhook/route"
"github.com/projectcapsule/capsule/pkg/webhook/service" "github.com/projectcapsule/capsule/pkg/webhook/service"
"github.com/projectcapsule/capsule/pkg/webhook/tenant" "github.com/projectcapsule/capsule/pkg/webhook/tenant"
@@ -195,6 +198,7 @@ func main() {
if err = (&tenantcontroller.Manager{ if err = (&tenantcontroller.Manager{
RESTConfig: manager.GetConfig(), RESTConfig: manager.GetConfig(),
Client: manager.GetClient(), Client: manager.GetClient(),
Metrics: metrics.MustMakeTenantRecorder(),
Log: ctrl.Log.WithName("controllers").WithName("Tenant"), Log: ctrl.Log.WithName("controllers").WithName("Tenant"),
Recorder: manager.GetEventRecorderFor("tenant-controller"), Recorder: manager.GetEventRecorderFor("tenant-controller"),
}).SetupWithManager(manager); err != nil { }).SetupWithManager(manager); err != nil {
@@ -236,6 +240,10 @@ func main() {
route.CustomResources(tenant.ResourceCounterHandler(manager.GetClient())), route.CustomResources(tenant.ResourceCounterHandler(manager.GetClient())),
route.Gateway(gateway.Class(cfg)), route.Gateway(gateway.Class(cfg)),
route.Defaults(defaults.Handler(cfg, kubeVersion)), route.Defaults(defaults.Handler(cfg, kubeVersion)),
route.ResourcePoolMutation((resourcepool.PoolMutationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepool")))),
route.ResourcePoolValidation((resourcepool.PoolValidationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepool")))),
route.ResourcePoolClaimMutation((resourcepool.ClaimMutationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepoolclaims")))),
route.ResourcePoolClaimValidation((resourcepool.ClaimValidationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepoolclaims")))),
) )
nodeWebhookSupported, _ := utils.NodeWebhookSupported(kubeVersion) nodeWebhookSupported, _ := utils.NodeWebhookSupported(kubeVersion)
@@ -304,6 +312,15 @@ func main() {
os.Exit(1) os.Exit(1)
} }
if err := resourcepools.Add(
ctrl.Log.WithName("controllers").WithName("ResourcePools"),
manager,
manager.GetEventRecorderFor("pools-ctrl"),
); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "resourcepools")
os.Exit(1)
}
setupLog.Info("starting manager") setupLog.Info("starting manager")
if err = manager.Start(ctx); err != nil { if err = manager.Start(ctx); err != nil {

View File

@@ -0,0 +1,294 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package resourcepools
import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/meta"
"github.com/projectcapsule/capsule/pkg/metrics"
)
type resourceClaimController struct {
client.Client
metrics *metrics.ClaimRecorder
log logr.Logger
recorder record.EventRecorder
}
func (r *resourceClaimController) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&capsulev1beta2.ResourcePoolClaim{}).
Watches(
&capsulev1beta2.ResourcePool{},
handler.EnqueueRequestsFromMapFunc(r.claimsWithoutPoolFromNamespaces),
builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}),
).
Complete(r)
}
func (r resourceClaimController) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
log := r.log.WithValues("Request.Name", request.Name)
instance := &capsulev1beta2.ResourcePoolClaim{}
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
if apierrors.IsNotFound(err) {
log.Info("Request object not found, could have been deleted after reconcile request")
r.metrics.DeleteClaimMetric(request.Name)
return reconcile.Result{}, nil
}
log.Error(err, "Error reading the object")
return
}
// Ensuring the Quota Status
err = r.reconcile(ctx, log, instance)
// Emit a Metric in any case
r.metrics.RecordClaimCondition(instance)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, err
}
// Trigger claims from a namespace, which are not yet allocated.
// when a resourcepool updates it's status.
func (r *resourceClaimController) claimsWithoutPoolFromNamespaces(ctx context.Context, obj client.Object) []reconcile.Request {
pool, ok := obj.(*capsulev1beta2.ResourcePool)
if !ok {
return nil
}
var requests []reconcile.Request
for _, ns := range pool.Status.Namespaces {
claimList := &capsulev1beta2.ResourcePoolClaimList{}
if err := r.List(ctx, claimList, client.InNamespace(ns)); err != nil {
r.log.Error(err, "Failed to list claims in namespace", "namespace", ns)
continue
}
for _, claim := range claimList.Items {
if claim.Status.Pool.UID == "" {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: claim.Namespace,
Name: claim.Name,
},
})
}
}
}
return requests
}
// This Controller is responsible for assigning Claims to ResourcePools.
// Everything else will be handeled by the ResourcePool Controller.
func (r resourceClaimController) reconcile(
ctx context.Context,
log logr.Logger,
claim *capsulev1beta2.ResourcePoolClaim,
) (err error) {
pool, err := r.evaluateResourcePool(ctx, claim)
if err != nil {
claim.Status.Pool = api.StatusNameUID{}
cond := meta.NewAssignedCondition(claim)
cond.Status = metav1.ConditionFalse
cond.Reason = meta.FailedReason
cond.Message = err.Error()
return updateStatusAndEmitEvent(
ctx,
r.Client,
r.recorder,
claim,
cond,
)
}
return r.allocateResourcePool(ctx, log, claim, pool)
}
// Verify a Pool can be allocated.
func (r resourceClaimController) evaluateResourcePool(
ctx context.Context,
claim *capsulev1beta2.ResourcePoolClaim,
) (pool *capsulev1beta2.ResourcePool, err error) {
poolName := claim.Spec.Pool
if poolName == "" {
err = fmt.Errorf("no pool reference was defined")
return pool, err
}
pool = &capsulev1beta2.ResourcePool{}
if err := r.Get(ctx, client.ObjectKey{
Name: poolName,
}, pool); err != nil {
return nil, err
}
if !pool.DeletionTimestamp.IsZero() {
return nil, fmt.Errorf(
"resourcepool not available",
)
}
allowed := false
for _, ns := range pool.Status.Namespaces {
if ns == claim.GetNamespace() {
allowed = true
continue
}
}
if !allowed {
return nil, fmt.Errorf(
"resourcepool not available",
)
}
// Validates if Resources can be allocated in the first place
for resourceName := range claim.Spec.ResourceClaims {
_, exists := pool.Status.Allocation.Hard[resourceName]
if !exists {
return nil, fmt.Errorf(
"resource %s is not available in pool %s",
resourceName,
pool.Name,
)
}
}
return pool, err
}
func (r resourceClaimController) allocateResourcePool(
ctx context.Context,
log logr.Logger,
cl *capsulev1beta2.ResourcePoolClaim,
pool *capsulev1beta2.ResourcePool,
) (err error) {
allocate := api.StatusNameUID{
Name: api.Name(pool.GetName()),
UID: pool.GetUID(),
}
if !meta.HasLooseOwnerReference(cl, pool) {
log.V(5).Info("adding ownerreference for", "pool", pool.Name)
patch := client.MergeFrom(cl.DeepCopy())
if err := meta.SetLooseOwnerReference(cl, pool, r.Scheme()); err != nil {
return err
}
if err := r.Patch(ctx, cl, patch); err != nil {
return err
}
}
if cl.Status.Pool.Name == allocate.Name &&
cl.Status.Pool.UID == allocate.UID {
return nil
}
cond := meta.NewAssignedCondition(cl)
cond.Status = metav1.ConditionTrue
cond.Reason = meta.SucceededReason
// Set claim pool in status and condition
cl.Status = capsulev1beta2.ResourcePoolClaimStatus{
Pool: allocate,
Condition: cond,
}
// Update status in a separate call
if err := r.Client.Status().Update(ctx, cl); err != nil {
return err
}
return nil
}
// Update the Status of a claim and emit an event if Status changed.
func updateStatusAndEmitEvent(
ctx context.Context,
c client.Client,
recorder record.EventRecorder,
claim *capsulev1beta2.ResourcePoolClaim,
condition metav1.Condition,
) (err error) {
if claim.Status.Condition.Type == condition.Type &&
claim.Status.Condition.Status == condition.Status &&
claim.Status.Condition.Reason == condition.Reason &&
claim.Status.Condition.Message == condition.Message {
return nil
}
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
current := &capsulev1beta2.ResourcePoolClaim{}
if err := c.Get(ctx, client.ObjectKeyFromObject(claim), current); err != nil {
return fmt.Errorf("failed to refetch instance before update: %w", err)
}
current.Status.Condition = condition
return c.Status().Update(ctx, current)
})
claim.Status.Condition = condition
if err != nil {
return err
}
eventType := corev1.EventTypeNormal
if claim.Status.Condition.Status == metav1.ConditionFalse {
eventType = corev1.EventTypeWarning
}
recorder.AnnotatedEventf(
claim,
map[string]string{
"Status": string(claim.Status.Condition.Status),
"Type": claim.Status.Condition.Type,
},
eventType,
claim.Status.Condition.Reason,
claim.Status.Condition.Message,
)
return
}

View File

@@ -0,0 +1,40 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package resourcepools
import (
"fmt"
"github.com/go-logr/logr"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/projectcapsule/capsule/pkg/metrics"
)
func Add(
log logr.Logger,
mgr manager.Manager,
recorder record.EventRecorder,
) (err error) {
if err = (&resourcePoolController{
Client: mgr.GetClient(),
log: log.WithName("Pools"),
recorder: recorder,
metrics: metrics.MustMakeResourcePoolRecorder(),
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create pool controller: %w", err)
}
if err = (&resourceClaimController{
Client: mgr.GetClient(),
log: log.WithName("Claims"),
recorder: recorder,
metrics: metrics.MustMakeClaimRecorder(),
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create claim controller: %w", err)
}
return nil
}

View File

@@ -0,0 +1,771 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package resourcepools
import (
"context"
"fmt"
"sort"
"strings"
"github.com/go-logr/logr"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/meta"
"github.com/projectcapsule/capsule/pkg/metrics"
"github.com/projectcapsule/capsule/pkg/utils"
)
type resourcePoolController struct {
client.Client
metrics *metrics.ResourcePoolRecorder
log logr.Logger
recorder record.EventRecorder
}
func (r *resourcePoolController) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&capsulev1beta2.ResourcePool{}).
Owns(&corev1.ResourceQuota{}).
Watches(&capsulev1beta2.ResourcePoolClaim{},
handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &capsulev1beta2.ResourcePool{}),
).
Watches(&corev1.Namespace{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request {
// Fetch all GlobalResourceQuota objects
grqList := &capsulev1beta2.ResourcePoolList{}
if err := mgr.GetClient().List(ctx, grqList); err != nil {
r.log.Error(err, "Failed to list ResourcePools objects")
return nil
}
// Enqueue a reconcile request for each GlobalResourceQuota
var requests []reconcile.Request
for _, grq := range grqList.Items {
requests = append(requests, reconcile.Request{
NamespacedName: client.ObjectKeyFromObject(&grq),
})
}
return requests
}),
).
Complete(r)
}
func (r resourcePoolController) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
log := r.log.WithValues("Request.Name", request.Name)
// Fetch the Tenant instance
instance := &capsulev1beta2.ResourcePool{}
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
if apierrors.IsNotFound(err) {
log.Info("Request object not found, could have been deleted after reconcile request")
r.metrics.DeleteResourcePoolMetric(request.Name)
return reconcile.Result{}, nil
}
log.Error(err, "Error reading the object")
return
}
// ResourceQuota Reconciliation
reconcileErr := r.reconcile(ctx, log, instance)
r.metrics.ResourceUsageMetrics(instance)
// Always Post Status
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
current := &capsulev1beta2.ResourcePool{}
if err := r.Get(ctx, client.ObjectKeyFromObject(instance), current); err != nil {
return fmt.Errorf("failed to refetch instance before update: %w", err)
}
current.Status = instance.Status
return r.Client.Status().Update(ctx, current)
})
if reconcileErr != nil || err != nil {
log.V(3).Info("Failed to reconcile ResourcePool", "error", err)
return ctrl.Result{}, reconcileErr
}
err = r.finalize(ctx, instance)
return ctrl.Result{}, err
}
func (r *resourcePoolController) finalize(
ctx context.Context,
pool *capsulev1beta2.ResourcePool,
) error {
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
// Re-fetch latest version of the object
latest := &capsulev1beta2.ResourcePool{}
if err := r.Get(ctx, client.ObjectKeyFromObject(pool), latest); err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return err
}
changed := false
// Case: all claims are gone, remove finalizer
if latest.Status.ClaimSize == 0 && controllerutil.ContainsFinalizer(latest, meta.ControllerFinalizer) {
controllerutil.RemoveFinalizer(latest, meta.ControllerFinalizer)
changed = true
}
// Case: claims still exist, add finalizer if not already present
if latest.Status.ClaimSize > 0 && !controllerutil.ContainsFinalizer(latest, meta.ControllerFinalizer) {
controllerutil.AddFinalizer(latest, meta.ControllerFinalizer)
changed = true
}
if changed {
return r.Update(ctx, latest)
}
return nil
})
}
func (r *resourcePoolController) reconcile(
ctx context.Context,
log logr.Logger,
pool *capsulev1beta2.ResourcePool,
) (err error) {
r.handlePoolHardResources(pool)
namespaces, err := r.gatherMatchingNamespaces(ctx, log, pool)
if err != nil {
log.Error(err, "Can not get matching namespaces")
return err
}
currentNamespaces := make(map[string]struct{}, len(namespaces))
for _, ns := range namespaces {
currentNamespaces[ns.Name] = struct{}{}
}
claims, err := r.gatherMatchingClaims(ctx, log, pool, currentNamespaces)
if err != nil {
log.Error(err, "Can not get matching namespaces")
return err
}
log.V(5).Info("Collected assigned claims", "count", len(claims))
if err := r.garbageCollection(ctx, log, pool, claims, currentNamespaces); err != nil {
log.Error(err, "Failed to garbage collect ResourceQuotas")
return err
}
pool.AssignNamespaces(namespaces)
// Sort by creation timestamp (oldest first)
sort.Slice(claims, func(i, j int) bool {
return claims[i].CreationTimestamp.Before(&claims[j].CreationTimestamp)
})
// Keeps track of resources which are exhausted by previous resource
// This is only required when Ordered is active
queuedResourcesMap := make(map[string]resource.Quantity)
// You can now iterate over `allClaims` in order
for _, claim := range claims {
log.Info("Found claim", "name", claim.Name, "namespace", claim.Namespace, "created", claim.CreationTimestamp)
err = r.reconcileResourceClaim(ctx, log.WithValues("Claim", claim.Name), pool, &claim, queuedResourcesMap)
if err != nil {
log.Error(err, "Failed to reconcile ResourceQuotaClaim", "claim", claim.Name)
}
}
pool.CalculateClaimedResources()
pool.AssignClaims()
return r.syncResourceQuotas(ctx, r.Client, pool, namespaces)
}
// Reconciles a single ResourceClaim.
func (r *resourcePoolController) reconcileResourceClaim(
ctx context.Context,
log logr.Logger,
pool *capsulev1beta2.ResourcePool,
claim *capsulev1beta2.ResourcePoolClaim,
exhaustion map[string]resource.Quantity,
) (err error) {
t := pool.GetClaimFromStatus(claim)
if t != nil {
// TBD: Future Implementation for Claim Resizing here
return r.handleClaimToPoolBinding(ctx, pool, claim)
}
// Verify if a resource was already exhausted by a previous claim
if *pool.Spec.Config.OrderedQueue {
var queued bool
queued, err = r.handleClaimOrderedExhaustion(
ctx,
claim,
exhaustion,
)
if err != nil {
return err
}
if queued {
log.V(5).Info("Claim is queued", "claim", claim.Name)
return nil
}
}
// Check if Resources can be Assigned (Enough Resources to claim)
exhaustions := r.canClaimWithinNamespace(log, pool, claim)
if len(exhaustions) != 0 {
log.V(5).Info("exhausting resources", "amount", len(exhaustions))
return r.handleClaimResourceExhaustion(
ctx,
pool,
claim,
exhaustions,
exhaustion,
)
}
return r.handleClaimToPoolBinding(ctx, pool, claim)
}
func (r *resourcePoolController) canClaimWithinNamespace(
log logr.Logger,
pool *capsulev1beta2.ResourcePool,
claim *capsulev1beta2.ResourcePoolClaim,
) (res map[string]PoolExhaustionResource) {
claimable := pool.GetAvailableClaimableResources()
log.V(5).Info("claimable resources", "claimable", claimable)
_, namespaceClaimed := pool.GetNamespaceClaims(claim.Namespace)
log.V(5).Info("namespace claimed resources", "claimed", namespaceClaimed)
res = make(map[string]PoolExhaustionResource)
for resourceName, req := range claim.Spec.ResourceClaims {
// Verify if total Quota is available
available, exists := claimable[resourceName]
if !exists || available.IsZero() || available.Cmp(req) < 0 {
log.V(5).Info("not enough resources available", "available", available, "requesting", req)
res[resourceName.String()] = PoolExhaustionResource{
Available: available,
Requesting: req,
Namespace: false,
}
continue
}
}
return
}
// Handles exhaustions when a exhaustion was already declared in the given map.
func (r *resourcePoolController) handleClaimOrderedExhaustion(
ctx context.Context,
claim *capsulev1beta2.ResourcePoolClaim,
exhaustion map[string]resource.Quantity,
) (queued bool, err error) {
status := make([]string, 0)
for resourceName, qt := range claim.Spec.ResourceClaims {
req, ok := exhaustion[resourceName.String()]
if !ok {
continue
}
line := fmt.Sprintf(
"requested: %s=%s, queued: %s=%s",
resourceName,
qt.String(),
resourceName,
req.String(),
)
status = append(status, line)
}
if len(status) != 0 {
queued = true
cond := meta.NewBoundCondition(claim)
cond.Status = metav1.ConditionFalse
cond.Reason = meta.QueueExhaustedReason
cond.Message = strings.Join(status, "; ")
return queued, updateStatusAndEmitEvent(ctx, r.Client, r.recorder, claim, cond)
}
return
}
func (r *resourcePoolController) handleClaimResourceExhaustion(
ctx context.Context,
pool *capsulev1beta2.ResourcePool,
claim *capsulev1beta2.ResourcePoolClaim,
exhaustions map[string]PoolExhaustionResource,
exhaustion map[string]resource.Quantity,
) (err error) {
status := make([]string, 0)
resourceNames := make([]string, 0)
for resourceName := range exhaustions {
resourceNames = append(resourceNames, resourceName)
}
sort.Strings(resourceNames)
for _, resourceName := range resourceNames {
ex := exhaustions[resourceName]
if *pool.Spec.Config.OrderedQueue {
ext, ok := exhaustion[resourceName]
if ok {
ext.Add(ex.Requesting)
} else {
ext = ex.Requesting
}
exhaustion[resourceName] = ext
}
line := fmt.Sprintf(
"requested: %s=%s, available: %s=%s",
resourceName,
ex.Requesting.String(),
resourceName,
ex.Available.String(),
)
status = append(status, line)
}
if len(status) != 0 {
cond := meta.NewBoundCondition(claim)
cond.Status = metav1.ConditionFalse
cond.Reason = meta.PoolExhaustedReason
cond.Message = strings.Join(status, "; ")
return updateStatusAndEmitEvent(ctx, r.Client, r.recorder, claim, cond)
}
return err
}
func (r *resourcePoolController) handleClaimToPoolBinding(
ctx context.Context,
pool *capsulev1beta2.ResourcePool,
claim *capsulev1beta2.ResourcePoolClaim,
) (err error) {
cond := meta.NewBoundCondition(claim)
cond.Status = metav1.ConditionTrue
cond.Reason = meta.SucceededReason
cond.Message = "Claimed resources"
if err = updateStatusAndEmitEvent(ctx, r.Client, r.recorder, claim, cond); err != nil {
return
}
pool.AddClaimToStatus(claim)
return
}
// Attempts to garbage collect a ResourceQuota resource.
func (r *resourcePoolController) handleClaimDisassociation(
ctx context.Context,
log logr.Logger,
pool *capsulev1beta2.ResourcePool,
claim *capsulev1beta2.ResourcePoolClaimsItem,
) error {
current := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: claim.Name.String(),
Namespace: claim.Namespace.String(),
UID: claim.UID,
},
}
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err := r.Get(ctx, types.NamespacedName{
Name: claim.Name.String(),
Namespace: claim.Namespace.String(),
}, current); err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return fmt.Errorf("failed to refetch claim before patch: %w", err)
}
if !*pool.Spec.Config.DeleteBoundResources || meta.ReleaseAnnotationTriggers(current) {
patch := client.MergeFrom(current.DeepCopy())
meta.RemoveLooseOwnerReference(current, pool)
meta.ReleaseAnnotationRemove(current)
if err := r.Patch(ctx, current, patch); err != nil {
return fmt.Errorf("failed to patch claim: %w", err)
}
}
current.Status.Pool = api.StatusNameUID{}
if err := r.Client.Status().Update(ctx, current); err != nil {
return fmt.Errorf("failed to update claim status: %w", err)
}
r.recorder.AnnotatedEventf(
current,
map[string]string{
"Status": string(metav1.ConditionFalse),
"Type": meta.NotReadyCondition,
},
corev1.EventTypeNormal,
"Disassociated",
"Claim is disassociated from the pool",
)
return nil
})
if err != nil {
log.Info("Removing owner reference failed", "claim", current.Name, "pool", pool.Name, "error", err)
return err
}
pool.RemoveClaimFromStatus(current)
return nil
}
// Synchronize resources quotas in all the given namespaces (routines).
func (r *resourcePoolController) syncResourceQuotas(
ctx context.Context,
c client.Client,
quota *capsulev1beta2.ResourcePool,
namespaces []corev1.Namespace,
) (err error) {
group := new(errgroup.Group)
for _, ns := range namespaces {
namespace := ns
group.Go(func() error {
return r.syncResourceQuota(ctx, c, quota, namespace)
})
}
return group.Wait()
}
// Synchronize a single resourcequota.
func (r *resourcePoolController) syncResourceQuota(
ctx context.Context,
c client.Client,
pool *capsulev1beta2.ResourcePool,
namespace corev1.Namespace,
) (err error) {
// getting ResourceQuota labels for the mutateFn
var quotaLabel string
if quotaLabel, err = utils.GetTypeLabel(&capsulev1beta2.ResourcePool{}); err != nil {
return err
}
target := &corev1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{
Name: utils.PoolResourceQuotaName(pool),
Namespace: namespace.GetName(),
},
}
if err := c.Get(ctx, types.NamespacedName{Name: target.Name, Namespace: target.Namespace}, target); err != nil && !apierrors.IsNotFound(err) {
return err
}
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
_, retryErr = controllerutil.CreateOrUpdate(ctx, c, target, func() (err error) {
targetLabels := target.GetLabels()
if targetLabels == nil {
targetLabels = map[string]string{}
}
targetLabels[quotaLabel] = pool.Name
target.SetLabels(targetLabels)
target.Spec.Scopes = pool.Spec.Quota.Scopes
target.Spec.ScopeSelector = pool.Spec.Quota.ScopeSelector
// Assign to resourcequota all the claims + defaults
target.Spec.Hard = pool.GetResourceQuotaHardResources(namespace.GetName())
return controllerutil.SetControllerReference(pool, target, c.Scheme())
})
return retryErr
})
if err != nil {
return err
}
return nil
}
// Handles new allocated resources before they are passed on to the pool itself.
// It does not verify the same stuff, as the admission for resourcepools.
func (r *resourcePoolController) handlePoolHardResources(pool *capsulev1beta2.ResourcePool) {
if &pool.Status.Allocation.Hard != &pool.Spec.Quota.Hard {
for resourceName := range pool.Status.Allocation.Hard {
if _, ok := pool.Spec.Quota.Hard[resourceName]; !ok {
r.metrics.DeleteResourcePoolSingleResourceMetric(pool.Name, resourceName.String())
}
}
}
pool.Status.Allocation.Hard = pool.Spec.Quota.Hard
}
// Get Currently selected namespaces for the resourcepool.
func (r *resourcePoolController) gatherMatchingNamespaces(
ctx context.Context,
log logr.Logger,
pool *capsulev1beta2.ResourcePool,
) (namespaces []corev1.Namespace, err error) {
// Collect Namespaces (Matching)
namespaces = make([]corev1.Namespace, 0)
seenNamespaces := make(map[string]struct{})
if !pool.DeletionTimestamp.IsZero() {
return
}
for _, selector := range pool.Spec.Selectors {
selected, serr := selector.GetMatchingNamespaces(ctx, r.Client)
if serr != nil {
log.Error(err, "Cannot get matching namespaces")
continue
}
for _, ns := range selected {
if !ns.DeletionTimestamp.IsZero() {
continue
}
if _, exists := seenNamespaces[ns.Name]; exists {
continue
}
seenNamespaces[ns.Name] = struct{}{}
namespaces = append(namespaces, ns)
}
}
return
}
// Get Currently selected claims for the resourcepool.
func (r *resourcePoolController) gatherMatchingClaims(
ctx context.Context,
log logr.Logger,
pool *capsulev1beta2.ResourcePool,
namespaces map[string]struct{},
) (claims []capsulev1beta2.ResourcePoolClaim, err error) {
if !pool.DeletionTimestamp.IsZero() {
return claims, err
}
claimList := &capsulev1beta2.ResourcePoolClaimList{}
if err := r.List(ctx, claimList, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".status.pool.uid", string(pool.GetUID())),
}); err != nil {
log.Error(err, "failed to list ResourceQuotaClaims")
return claims, err
}
filteredClaims := make([]capsulev1beta2.ResourcePoolClaim, 0)
for _, claim := range claimList.Items {
if meta.ReleaseAnnotationTriggers(&claim) {
continue
}
if _, ok := namespaces[claim.Namespace]; !ok {
continue
}
filteredClaims = append(filteredClaims, claim)
}
// Sort by creation timestamp (oldest first)
sort.Slice(filteredClaims, func(i, j int) bool {
a := filteredClaims[i]
b := filteredClaims[j]
// First, sort by CreationTimestamp
if !a.CreationTimestamp.Equal(&b.CreationTimestamp) {
return a.CreationTimestamp.Before(&b.CreationTimestamp)
}
// Tiebreaker: use name as a stable secondary sort - If CreationTimestamp is equal
// (e.g., when two claims are created at the same time in Gitops environments or CI/CD pipelines)
if a.Name != b.Name {
return a.Name < b.Name
}
return a.Namespace < b.Namespace
})
return filteredClaims, nil
}
// Attempts to garbage collect a ResourceQuota resource.
func (r *resourcePoolController) garbageCollection(
ctx context.Context,
log logr.Logger,
pool *capsulev1beta2.ResourcePool,
claims []capsulev1beta2.ResourcePoolClaim,
namespaces map[string]struct{},
) error {
activeClaims := make(map[string]struct{}, len(claims))
for _, claim := range claims {
activeClaims[string(claim.UID)] = struct{}{}
}
log.V(5).Info("available items", "namespaces", namespaces, "claims", activeClaims)
namespaceMarkedForGC := make(map[string]bool, len(pool.Status.Namespaces))
for _, ns := range pool.Status.Namespaces {
_, exists := namespaces[ns]
if !exists {
log.V(5).Info("garbage collecting namespace", "namespace", ns)
namespaceMarkedForGC[ns] = true
if err := r.garbageCollectNamespace(ctx, pool, ns); err != nil {
r.log.Error(err, "Failed to garbage collect resource quota", "namespace", ns)
return err
}
}
}
// Garbage collect namespaces which no longer match selector
for ns, clms := range pool.Status.Claims {
nsMarked := namespaceMarkedForGC[ns]
for _, cl := range clms {
_, claimActive := activeClaims[string(cl.UID)]
if nsMarked || !claimActive {
log.V(5).Info("Disassociating claim", "claim", cl.Name, "namespace", ns, "uid", cl.UID, "nsGC", nsMarked, "claimGC", claimActive)
cl.Namespace = api.Name(ns)
if err := r.handleClaimDisassociation(ctx, log, pool, cl); err != nil {
r.log.Error(err, "Failed to disassociate claim", "namespace", ns, "uid", cl.UID)
return err
}
}
}
if nsMarked || len(pool.Status.Claims[ns]) == 0 {
delete(pool.Status.Claims, ns)
}
}
// We can recalculate the usage in the end
// Since it's only going to decrease
pool.CalculateClaimedResources()
return nil
}
// Attempts to garbage collect a ResourceQuota resource.
func (r *resourcePoolController) garbageCollectNamespace(
ctx context.Context,
pool *capsulev1beta2.ResourcePool,
namespace string,
) error {
r.metrics.DeleteResourcePoolNamespaceMetric(pool.Name, namespace)
// Check if the namespace still exists
ns := &corev1.Namespace{}
if err := r.Get(ctx, types.NamespacedName{Name: namespace}, ns); err != nil {
if apierrors.IsNotFound(err) {
r.log.V(5).Info("Namespace does not exist, skipping garbage collection", "namespace", namespace)
return nil
}
return fmt.Errorf("failed to check namespace existence: %w", err)
}
name := utils.PoolResourceQuotaName(pool)
// Attempt to delete the ResourceQuota
target := &corev1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
err := r.Get(ctx, types.NamespacedName{Namespace: namespace, Name: target.GetName()}, target)
if err != nil {
if apierrors.IsNotFound(err) {
r.log.V(5).Info("ResourceQuota already deleted", "namespace", namespace, "name", name)
return nil
}
return err
}
// Delete the ResourceQuota
if err := r.Delete(ctx, target); err != nil {
return fmt.Errorf("failed to delete ResourceQuota %s in namespace %s: %w", name, namespace, err)
}
return nil
}

View File

@@ -0,0 +1,16 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package resourcepools
import (
"k8s.io/apimachinery/pkg/api/resource"
)
type PoolExhaustion map[string]PoolExhaustionResource
type PoolExhaustionResource struct {
Namespace bool
Available resource.Quantity
Requesting resource.Quantity
}

View File

@@ -25,6 +25,7 @@ import (
type Manager struct { type Manager struct {
client.Client client.Client
Metrics *metrics.TenantRecorder
Log logr.Logger Log logr.Logger
Recorder record.EventRecorder Recorder record.EventRecorder
RESTConfig *rest.Config RESTConfig *rest.Config
@@ -51,8 +52,7 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct
r.Log.Info("Request object not found, could have been deleted after reconcile request") r.Log.Info("Request object not found, could have been deleted after reconcile request")
// If tenant was deleted or cannot be found, clean up metrics // If tenant was deleted or cannot be found, clean up metrics
metrics.TenantResourceUsage.DeletePartialMatch(map[string]string{"tenant": request.Name}) r.Metrics.DeleteTenantMetric(request.Name)
metrics.TenantResourceLimit.DeletePartialMatch(map[string]string{"tenant": request.Name})
return reconcile.Result{}, nil return reconcile.Result{}, nil
} }

View File

@@ -23,7 +23,6 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/metrics"
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
@@ -54,14 +53,13 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2
} }
// Remove prior metrics, to avoid cleaning up for metrics of deleted ResourceQuotas // Remove prior metrics, to avoid cleaning up for metrics of deleted ResourceQuotas
metrics.TenantResourceUsage.DeletePartialMatch(map[string]string{"tenant": tenant.Name}) r.Metrics.DeleteTenantMetric(tenant.Name)
metrics.TenantResourceLimit.DeletePartialMatch(map[string]string{"tenant": tenant.Name})
// Expose the namespace quota and usage as metrics for the tenant // Expose the namespace quota and usage as metrics for the tenant
metrics.TenantResourceUsage.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(tenant.Status.Size)) r.Metrics.TenantResourceUsageGauge.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(tenant.Status.Size))
if tenant.Spec.NamespaceOptions != nil && tenant.Spec.NamespaceOptions.Quota != nil { if tenant.Spec.NamespaceOptions != nil && tenant.Spec.NamespaceOptions.Quota != nil {
metrics.TenantResourceLimit.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(*tenant.Spec.NamespaceOptions.Quota)) r.Metrics.TenantResourceLimitGauge.WithLabelValues(tenant.Name, "namespaces", "").Set(float64(*tenant.Spec.NamespaceOptions.Quota))
} }
//nolint:nestif //nolint:nestif
@@ -99,6 +97,7 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2
return scopeErr return scopeErr
} }
// Iterating over all the options declared for the ResourceQuota, // Iterating over all the options declared for the ResourceQuota,
// summing all the used quota across different Namespaces to determinate // summing all the used quota across different Namespaces to determinate
// if we're hitting a Hard quota at Tenant level. // if we're hitting a Hard quota at Tenant level.
@@ -116,13 +115,13 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2
r.Log.Info("Computed " + name.String() + " quota for the whole Tenant is " + quantity.String()) r.Log.Info("Computed " + name.String() + " quota for the whole Tenant is " + quantity.String())
// Expose usage and limit metrics for the resource (name) of the ResourceQuota (index) // Expose usage and limit metrics for the resource (name) of the ResourceQuota (index)
metrics.TenantResourceUsage.WithLabelValues( r.Metrics.TenantResourceUsageGauge.WithLabelValues(
tenant.Name, tenant.Name,
name.String(), name.String(),
strconv.Itoa(index), strconv.Itoa(index),
).Set(float64(quantity.MilliValue()) / 1000) ).Set(float64(quantity.MilliValue()) / 1000)
metrics.TenantResourceLimit.WithLabelValues( r.Metrics.TenantResourceLimitGauge.WithLabelValues(
tenant.Name, tenant.Name,
name.String(), name.String(),
strconv.Itoa(index), strconv.Itoa(index),

View File

@@ -16,7 +16,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a Namespace with an additional Role Binding", func() { var _ = Describe("creating a Namespace with an additional Role Binding", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "additional-role-binding", Name: "additional-role-binding",

View File

@@ -16,7 +16,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("enforcing an allowed set of Service external IPs", func() { var _ = Describe("enforcing an allowed set of Service external IPs", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allowed-external-ip", Name: "allowed-external-ip",

View File

@@ -23,7 +23,7 @@ type Patch struct {
Value string `json:"value"` Value string `json:"value"`
} }
var _ = Describe("enforcing a Container Registry", func() { var _ = Describe("enforcing a Container Registry", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "container-registry", Name: "container-registry",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace as Tenant owner with custom --capsule-group", func() { var _ = Describe("creating a Namespace as Tenant owner with custom --capsule-group", Label("config"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-assigned-custom-group", Name: "tenant-assigned-custom-group",

View File

@@ -21,7 +21,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("when Tenant limits custom Resource Quota", func() { var _ = Describe("when Tenant limits custom Resource Quota", Label("resourcequota"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "limiting-resources", Name: "limiting-resources",
@@ -100,7 +100,7 @@ var _ = Describe("when Tenant limits custom Resource Quota", func() {
dynamicClient := dynamic.NewForConfigOrDie(cfg) dynamicClient := dynamic.NewForConfigOrDie(cfg)
for _, i := range []int{1, 2, 3} { for _, i := range []int{1, 2, 3} {
ns := NewNamespace(fmt.Sprintf("resource-ns-%d", i)) ns := NewNamespace(fmt.Sprintf("limiting-resources-ns-%d", i))
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed()) NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName())) TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
@@ -122,7 +122,7 @@ var _ = Describe("when Tenant limits custom Resource Quota", func() {
} }
for _, i := range []int{1, 2, 3} { for _, i := range []int{1, 2, 3} {
ns := NewNamespace(fmt.Sprintf("resource-ns-%d", i)) ns := NewNamespace(fmt.Sprintf("limiting-resources-ns-%d", i))
obj := &unstructured.Unstructured{ obj := &unstructured.Unstructured{
Object: map[string]interface{}{ Object: map[string]interface{}{

View File

@@ -17,7 +17,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating an ExternalName service when it is disabled for Tenant", func() { var _ = Describe("creating an ExternalName service when it is disabled for Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "disable-external-service", Name: "disable-external-service",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("creating an Ingress with a wildcard when it is denied for the Tenant", func() { var _ = Describe("creating an Ingress with a wildcard when it is denied for the Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "denied-ingress-wildcard", Name: "denied-ingress-wildcard",

View File

@@ -17,7 +17,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a LoadBalancer service when it is disabled for Tenant", func() { var _ = Describe("creating a LoadBalancer service when it is disabled for Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "disable-loadbalancer-service", Name: "disable-loadbalancer-service",

View File

@@ -17,7 +17,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a nodePort service when it is disabled for Tenant", func() { var _ = Describe("creating a nodePort service when it is disabled for Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "disable-node-ports", Name: "disable-node-ports",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("defining dynamic Tenant Owner Cluster Roles", func() { var _ = Describe("defining dynamic Tenant Owner Cluster Roles", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "dynamic-tenant-owner-clusterroles", Name: "dynamic-tenant-owner-clusterroles",

View File

@@ -17,7 +17,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a LoadBalancer service when it is enabled for Tenant", func() { var _ = Describe("creating a LoadBalancer service when it is enabled for Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "enable-loadbalancer-service", Name: "enable-loadbalancer-service",

View File

@@ -15,7 +15,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a nodePort service when it is enabled for Tenant", func() { var _ = Describe("creating a nodePort service when it is enabled for Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "enable-node-ports", Name: "enable-node-ports",

View File

@@ -14,7 +14,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a tenant with various forbidden regexes", func() { var _ = Describe("creating a tenant with various forbidden regexes", Label("tenant"), func() {
//errorRegexes := []string{ //errorRegexes := []string{
// "(.*gitops|.*nsm).[k8s.io/((?!(resource)).*|trusted)](http://k8s.io/((?!(resource)).*%7Ctrusted))", // "(.*gitops|.*nsm).[k8s.io/((?!(resource)).*|trusted)](http://k8s.io/((?!(resource)).*%7Ctrusted))",
//} //}

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace with Tenant name prefix enforcement at Tenant scope", func() { var _ = Describe("creating a Namespace with Tenant name prefix enforcement at Tenant scope", Label("tenant", "config"), func() {
t1 := &capsulev1beta2.Tenant{ t1 := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "awesome", Name: "awesome",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace with Tenant name prefix enforcement", func() { var _ = Describe("creating a Namespace with Tenant name prefix enforcement", Label("tenant"), func() {
t1 := &capsulev1beta2.Tenant{ t1 := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "awesome", Name: "awesome",

View File

@@ -15,7 +15,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("enforcing some defined ImagePullPolicy", func() { var _ = Describe("enforcing some defined ImagePullPolicy", Label("pod"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "image-pull-policies", Name: "image-pull-policies",

View File

@@ -15,7 +15,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("enforcing a defined ImagePullPolicy", func() { var _ = Describe("enforcing a defined ImagePullPolicy", Label("pod"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "image-pull-policy", Name: "image-pull-policy",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("when Tenant handles Ingress classes with extensions/v1beta1", func() { var _ = Describe("when Tenant handles Ingress classes with extensions/v1beta1", Label("ingress"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "ingress-class-extensions-v1beta1", Name: "ingress-class-extensions-v1beta1",

View File

@@ -24,7 +24,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("when Tenant handles Ingress classes with networking.k8s.io/v1", func() { var _ = Describe("when Tenant handles Ingress classes with networking.k8s.io/v1", Label("ingress"), func() {
tntNoDefault := &capsulev1beta2.Tenant{ tntNoDefault := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "ic-selector-networking-v1", Name: "ic-selector-networking-v1",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("when handling Cluster scoped Ingress hostnames collision", func() { var _ = Describe("when handling Cluster scoped Ingress hostnames collision", Label("ingress"), func() {
tnt1 := &capsulev1beta2.Tenant{ tnt1 := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "hostnames-collision-cluster-one", Name: "hostnames-collision-cluster-one",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("when disabling Ingress hostnames collision", func() { var _ = Describe("when disabling Ingress hostnames collision", Label("ingress"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "hostnames-collision-disabled", Name: "hostnames-collision-disabled",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("when handling Namespace scoped Ingress hostnames collision", func() { var _ = Describe("when handling Namespace scoped Ingress hostnames collision", Label("ingress"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "hostnames-collision-namespace", Name: "hostnames-collision-namespace",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("when handling Tenant scoped Ingress hostnames collision", func() { var _ = Describe("when handling Tenant scoped Ingress hostnames collision", Label("ingress"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "hostnames-collision-tenant", Name: "hostnames-collision-tenant",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("when Tenant handles Ingress hostnames", func() { var _ = Describe("when Tenant handles Ingress hostnames", Label("ingress"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "ingress-hostnames", Name: "ingress-hostnames",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace creation with no Tenant assigned", func() { var _ = Describe("creating a Namespace creation with no Tenant assigned", Label("tenant"), func() {
It("should fail", func() { It("should fail", func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
Spec: capsulev1beta2.TenantSpec{ Spec: capsulev1beta2.TenantSpec{

View File

@@ -15,7 +15,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a Namespace for a Tenant with additional metadata", func() { var _ = Describe("creating a Namespace for a Tenant with additional metadata", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-metadata", Name: "tenant-metadata",

View File

@@ -15,7 +15,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating several Namespaces for a Tenant", func() { var _ = Describe("creating several Namespaces for a Tenant", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "capsule-labels", Name: "capsule-labels",

View File

@@ -17,7 +17,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
) )
var _ = Describe("creating several Namespaces for a Tenant", func() { var _ = Describe("creating several Namespaces for a Tenant", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "capsule-ns-attack-1", Name: "capsule-ns-attack-1",

View File

@@ -1,5 +1,3 @@
//go:build e2e
// Copyright 2020-2023 Project Capsule Authors. // Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
@@ -7,6 +5,7 @@ package e2e
import ( import (
"context" "context"
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -16,7 +15,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a Namespace for a Tenant with additional metadata", func() { var _ = Describe("creating a Namespace for a Tenant with additional metadata", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-metadata", Name: "tenant-metadata",
@@ -92,4 +91,4 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", f
}, defaultTimeoutInterval, defaultPollInterval).Should(BeTrue()) }, defaultTimeoutInterval, defaultPollInterval).Should(BeTrue())
}) })
}) })
}) })

View File

@@ -17,7 +17,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a Namespace for a Tenant with additional metadata", func() { var _ = Describe("creating a Namespace for a Tenant with additional metadata", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-metadata", Name: "tenant-metadata",
@@ -70,12 +70,12 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", f
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName())) TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
By("checking additional labels", func() { By("checking additional labels", func() {
Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.GetName()}, ns)).Should(Succeed()) Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.GetName()}, ns)).Should(Succeed())
for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Labels { for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Labels {
Expect(ns.Labels).To(HaveKeyWithValue(k, v)) Expect(ns.Labels).To(HaveKeyWithValue(k, v))
} }
return return
}) })
By("checking additional annotations", func() { By("checking additional annotations", func() {
Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.GetName()}, ns)).Should(Succeed()) Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: ns.GetName()}, ns)).Should(Succeed())

View File

@@ -17,7 +17,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a Namespace with user-specified labels and annotations", func() { var _ = Describe("creating a Namespace with user-specified labels and annotations", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-user-metadata-forbidden", Name: "tenant-user-metadata-forbidden",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespaces as different type of Tenant owners", func() { var _ = Describe("creating a Namespaces as different type of Tenant owners", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-assigned", Name: "tenant-assigned",

View File

@@ -18,7 +18,7 @@ import (
"github.com/projectcapsule/capsule/pkg/webhook/utils" "github.com/projectcapsule/capsule/pkg/webhook/utils"
) )
var _ = Describe("modifying node labels and annotations", func() { var _ = Describe("modifying node labels and annotations", Label("config", "nodes"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-node-user-metadata-forbidden", Name: "tenant-node-user-metadata-forbidden",

View File

@@ -14,7 +14,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace in over-quota of three", func() { var _ = Describe("creating a Namespace in over-quota of three", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "over-quota-tenant", Name: "over-quota-tenant",

View File

@@ -19,7 +19,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("when Tenant owner interacts with the webhooks", func() { var _ = Describe("when Tenant owner interacts with the webhooks", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-owner", Name: "tenant-owner",

View File

@@ -16,7 +16,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
) )
var _ = Describe("adding metadata to Pod objects", func() { var _ = Describe("adding metadata to Pod objects", Label("pod"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "pod-metadata", Name: "pod-metadata",

View File

@@ -21,7 +21,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("enforcing a Priority Class", func() { var _ = Describe("enforcing a Priority Class", Label("pod"), func() {
tntWithDefaults := &capsulev1beta2.Tenant{ tntWithDefaults := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "priority-class-defaults", Name: "priority-class-defaults",

View File

@@ -19,7 +19,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("enforcing a Runtime Class", func() { var _ = Describe("enforcing a Runtime Class", Label("pod"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "runtime-class", Name: "runtime-class",

View File

@@ -18,7 +18,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("preventing PersistentVolume cross-tenant mount", func() { var _ = Describe("preventing PersistentVolume cross-tenant mount", Label("tenant", "storage"), func() {
tnt1 := &capsulev1beta2.Tenant{ tnt1 := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "pv-one", Name: "pv-one",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace with a protected Namespace regex enabled", func() { var _ = Describe("creating a Namespace with a protected Namespace regex enabled", Label("namespace"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-protected-namespace", Name: "tenant-protected-namespace",

View File

@@ -21,7 +21,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("exceeding a Tenant resource quota", func() { var _ = Describe("exceeding a Tenant resource quota", Label("resourcequota"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-resources-changes", Name: "tenant-resources-changes",

2026
e2e/resourcepool_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,667 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package e2e
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/meta"
)
var _ = Describe("ResourcePoolClaim Tests", Label("resourcepool"), func() {
_ = &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "test-claims-1",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
},
},
Spec: capsulev1beta2.TenantSpec{
Owners: capsulev1beta2.OwnerListSpec{
{
Name: "wind-user",
Kind: "User",
},
},
},
}
JustAfterEach(func() {
Eventually(func() error {
poolList := &capsulev1beta2.TenantList{}
labelSelector := client.MatchingLabels{"e2e-resourcepoolclaims": "test"}
if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil {
return err
}
for _, pool := range poolList.Items {
if err := k8sClient.Delete(context.TODO(), &pool); err != nil {
return err
}
}
return nil
}, "30s", "5s").Should(Succeed())
Eventually(func() error {
poolList := &capsulev1beta2.ResourcePoolList{}
labelSelector := client.MatchingLabels{"e2e-resourcepoolclaims": "test"}
if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil {
return err
}
for _, pool := range poolList.Items {
if err := k8sClient.Delete(context.TODO(), &pool); err != nil {
return err
}
}
return nil
}, "30s", "5s").Should(Succeed())
Eventually(func() error {
poolList := &corev1.NamespaceList{}
labelSelector := client.MatchingLabels{"e2e-resourcepoolclaims": "test"}
if err := k8sClient.List(context.TODO(), poolList, labelSelector); err != nil {
return err
}
for _, pool := range poolList.Items {
if err := k8sClient.Delete(context.TODO(), &pool); err != nil {
return err
}
}
return nil
}, "30s", "5s").Should(Succeed())
})
It("Claim to Pool Assignment", func() {
pool1 := &capsulev1beta2.ResourcePool{
ObjectMeta: metav1.ObjectMeta{
Name: "test-binding-claims",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
},
},
Spec: capsulev1beta2.ResourcePoolSpec{
Selectors: []api.NamespaceSelector{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"capsule.clastix.io/tenant": "claims-bindings",
},
},
},
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"capsule.clastix.io/tenant": "claims-bindings-2",
},
},
},
},
Quota: corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("2"),
corev1.ResourceLimitsMemory: resource.MustParse("2Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("2"),
corev1.ResourceRequestsMemory: resource.MustParse("2Gi"),
},
},
},
}
claim1 := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "assign-pool-claim-1",
Namespace: "ns-1-pool-assign",
},
Spec: capsulev1beta2.ResourcePoolClaimSpec{
Pool: "test-binding-claims",
ResourceClaims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("0"),
corev1.ResourceLimitsMemory: resource.MustParse("0"),
corev1.ResourceRequestsCPU: resource.MustParse("0"),
corev1.ResourceRequestsMemory: resource.MustParse("0"),
},
},
}
claim2 := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "assign-pool-claim-2",
Namespace: "ns-2-pool-assign",
},
Spec: capsulev1beta2.ResourcePoolClaimSpec{
Pool: "test-binding-claims",
ResourceClaims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("0"),
corev1.ResourceLimitsMemory: resource.MustParse("0"),
corev1.ResourceRequestsCPU: resource.MustParse("0"),
corev1.ResourceRequestsMemory: resource.MustParse("0"),
},
},
}
By("Create the ResourcePool", func() {
err := k8sClient.Create(context.TODO(), pool1)
Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool1)
})
By("Get Applied revision", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool1.Name}, pool1)
Expect(err).Should(Succeed())
})
By("Create Namespaces, which are selected by the pool", func() {
ns1 := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns-1-pool-assign",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
"capsule.clastix.io/tenant": "claims-bindings",
},
},
}
err := k8sClient.Create(context.TODO(), ns1)
Expect(err).Should(Succeed())
ns2 := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns-2-pool-assign",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
"capsule.clastix.io/tenant": "claims-bindings-2",
},
},
}
err = k8sClient.Create(context.TODO(), ns2)
Expect(err).Should(Succeed())
ns3 := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "ns-3-pool-assign",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
"capsule.clastix.io/tenant": "something-else",
},
},
}
err = k8sClient.Create(context.TODO(), ns3)
Expect(err).Should(Succeed())
})
By("Verify Namespaces are shown as allowed targets", func() {
expectedNamespaces := []string{"ns-1-pool-assign", "ns-2-pool-assign"}
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool1.Name}, pool1)
Expect(err).Should(Succeed())
Expect(pool1.Status.Namespaces).To(Equal(expectedNamespaces))
Expect(pool1.Status.NamespaceSize).To(Equal(uint(2)))
})
By("Create a first claim and verify binding", func() {
err := k8sClient.Create(context.TODO(), claim1)
Expect(err).Should(Succeed(), "Failed to create Claim %s", claim1)
err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim1.Name, Namespace: claim1.Namespace}, claim1)
Expect(err).Should(Succeed())
isSuccessfullyBoundToPool(pool1, claim1)
expectedPool := api.StatusNameUID{
Name: api.Name(pool1.Name),
UID: pool1.GetUID(),
}
Expect(claim1.Status.Pool).To(Equal(expectedPool), "expected pool name to match")
Expect(claim1.Status.Condition.Status).To(Equal(metav1.ConditionTrue), "failed to verify condition status")
Expect(claim1.Status.Condition.Type).To(Equal(meta.BoundCondition), "failed to verify condition type")
Expect(claim1.Status.Condition.Reason).To(Equal(meta.SucceededReason), "failed to verify condition reason")
})
By("Create a second claim and verify binding", func() {
err := k8sClient.Create(context.TODO(), claim2)
Expect(err).Should(Succeed(), "Failed to create Claim %s", claim2)
err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim2.Name, Namespace: claim2.Namespace}, claim2)
Expect(err).Should(Succeed())
isSuccessfullyBoundToPool(pool1, claim2)
expectedPool := api.StatusNameUID{
Name: api.Name(pool1.Name),
UID: pool1.GetUID(),
}
Expect(claim2.Status.Pool).To(Equal(expectedPool), "expected pool name to match")
Expect(claim2.Status.Condition.Status).To(Equal(metav1.ConditionTrue), "failed to verify condition status")
Expect(claim2.Status.Condition.Type).To(Equal(meta.BoundCondition), "failed to verify condition type")
Expect(claim2.Status.Condition.Reason).To(Equal(meta.SucceededReason), "failed to verify condition reason")
})
By("Create a third claim and verify error", func() {
claim := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "assign-pool-claim-3",
Namespace: "ns-3-pool-assign",
},
Spec: capsulev1beta2.ResourcePoolClaimSpec{
Pool: "test-binding-claims",
ResourceClaims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("0"),
corev1.ResourceLimitsMemory: resource.MustParse("0"),
corev1.ResourceRequestsCPU: resource.MustParse("0"),
corev1.ResourceRequestsMemory: resource.MustParse("0"),
},
},
}
err := k8sClient.Create(context.TODO(), claim)
Expect(err).Should(Succeed(), "Failed to create Claim %s", claim)
err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
expectedPool := api.StatusNameUID{}
Expect(claim.Status.Pool).To(Equal(expectedPool), "expected pool name to be empty")
Expect(claim.Status.Condition.Status).To(Equal(metav1.ConditionFalse), "failed to verify condition status")
Expect(claim.Status.Condition.Type).To(Equal(meta.AssignedCondition), "failed to verify condition type")
Expect(claim.Status.Condition.Reason).To(Equal(meta.FailedReason), "failed to verify condition reason")
})
})
It("Admission (Validation) - Patch Guard", func() {
pool := &capsulev1beta2.ResourcePool{
ObjectMeta: metav1.ObjectMeta{
Name: "test-admission-claims",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
},
},
Spec: capsulev1beta2.ResourcePoolSpec{
Config: capsulev1beta2.ResourcePoolSpecConfiguration{
DeleteBoundResources: ptr.To(false),
},
Selectors: []api.NamespaceSelector{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"capsule.clastix.io/tenant": "admission-guards",
},
},
},
},
Quota: corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("2"),
corev1.ResourceLimitsMemory: resource.MustParse("2Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("2"),
corev1.ResourceRequestsMemory: resource.MustParse("2Gi"),
},
},
},
}
claim := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "admission-pool-claim-1",
Namespace: "ns-1-pool-admission",
},
Spec: capsulev1beta2.ResourcePoolClaimSpec{
Pool: pool.GetName(),
ResourceClaims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
corev1.ResourceLimitsMemory: resource.MustParse("1Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("1"),
corev1.ResourceRequestsMemory: resource.MustParse("1Gi"),
},
},
}
By("Create the Claim", func() {
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: claim.Namespace,
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
"capsule.clastix.io/tenant": "admission-guards",
},
},
}
err := k8sClient.Create(context.TODO(), ns)
Expect(err).Should(Succeed())
err = k8sClient.Create(context.TODO(), claim)
Expect(err).Should(Succeed(), "Failed to create Claim %s", claim)
})
By("Create the ResourcePool", func() {
err := k8sClient.Create(context.TODO(), pool)
Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool)
})
By("Get Applied revision", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: pool.Name}, pool)
Expect(err).Should(Succeed())
})
By("Bind a claim", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
expectedPool := api.StatusNameUID{
Name: api.Name(pool.Name),
UID: pool.GetUID(),
}
isBoundCondition(claim)
Expect(claim.Status.Pool).To(Equal(expectedPool), "expected pool name to match")
})
By("Error on patching resources for claim (Increase)", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
claim.Spec.ResourceClaims = corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("2"),
corev1.ResourceLimitsMemory: resource.MustParse("2Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("2"),
corev1.ResourceRequestsMemory: resource.MustParse("2Gi"),
}
err = k8sClient.Update(context.TODO(), claim)
Expect(err).ShouldNot(Succeed(), "Expected error when updating resources in bound state %s", claim)
})
By("Error on patching resources for claim (Decrease)", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
claim.Spec.ResourceClaims = corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("0"),
corev1.ResourceLimitsMemory: resource.MustParse("0Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("0"),
corev1.ResourceRequestsMemory: resource.MustParse("0Gi"),
}
err = k8sClient.Update(context.TODO(), claim)
Expect(err).ShouldNot(Succeed(), "Expected error when updating resources in bound state %s", claim)
})
By("Error on patching pool name", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
claim.Spec.Pool = "some-random-pool"
err = k8sClient.Update(context.TODO(), claim)
Expect(err).ShouldNot(Succeed(), "Expected error when updating resources in bound state %s", claim)
})
By("Delete Pool", func() {
err := k8sClient.Delete(context.TODO(), pool)
Expect(err).Should(Succeed())
})
By("Verify claim is no longer bound", func() {
isUnassignedCondition(claim)
})
By("Allow patching resources for claim (Increase)", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
claim.Spec.ResourceClaims = corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("2"),
corev1.ResourceLimitsMemory: resource.MustParse("2Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("2"),
corev1.ResourceRequestsMemory: resource.MustParse("2Gi"),
}
err = k8sClient.Update(context.TODO(), claim)
Expect(err).Should(Succeed(), "Expected error when updating resources in bound state %s", claim)
})
By("Allow patching resources for claim (Decrease)", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
claim.Spec.ResourceClaims = corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("0"),
corev1.ResourceLimitsMemory: resource.MustParse("0Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("0"),
corev1.ResourceRequestsMemory: resource.MustParse("0Gi"),
}
err = k8sClient.Update(context.TODO(), claim)
Expect(err).Should(Succeed(), "Expected error when updating resources in bound state %s", claim)
})
By("Allow patching pool name", func() {
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
claim.Spec.Pool = "some-random-pool"
err = k8sClient.Update(context.TODO(), claim)
Expect(err).Should(Succeed(), "Expected no error when updating resources in bound state %s", claim)
})
})
It("Admission (Mutation) - Auto Pool Assign", func() {
pool1 := &capsulev1beta2.ResourcePool{
ObjectMeta: metav1.ObjectMeta{
Name: "test-auto-assign-1",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
},
},
Spec: capsulev1beta2.ResourcePoolSpec{
Config: capsulev1beta2.ResourcePoolSpecConfiguration{
DeleteBoundResources: ptr.To(false),
},
Selectors: []api.NamespaceSelector{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"capsule.clastix.io/tenant": "admission-auto-assign",
},
},
},
},
Quota: corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("2"),
corev1.ResourceRequestsCPU: resource.MustParse("2"),
},
},
},
}
pool2 := &capsulev1beta2.ResourcePool{
ObjectMeta: metav1.ObjectMeta{
Name: "test-auto-assign-2",
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
},
},
Spec: capsulev1beta2.ResourcePoolSpec{
Config: capsulev1beta2.ResourcePoolSpecConfiguration{
DeleteBoundResources: ptr.To(false),
},
Selectors: []api.NamespaceSelector{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"capsule.clastix.io/tenant": "admission-auto-assign",
},
},
},
},
Quota: corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("2"),
corev1.ResourceRequestsMemory: resource.MustParse("2"),
},
},
},
}
By("Create the ResourcePools", func() {
err := k8sClient.Create(context.TODO(), pool1)
Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool1)
err = k8sClient.Create(context.TODO(), pool2)
Expect(err).Should(Succeed(), "Failed to create ResourcePool %s", pool2)
})
By("Auto Assign Claim (CPU)", func() {
claim := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "auto-assign-1",
Namespace: "ns-1-pool-assign",
},
Spec: capsulev1beta2.ResourcePoolClaimSpec{
ResourceClaims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
corev1.ResourceRequestsCPU: resource.MustParse("1"),
},
},
}
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: claim.Namespace,
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
"capsule.clastix.io/tenant": "admission-auto-assign",
},
},
}
err := k8sClient.Create(context.TODO(), ns)
Expect(err).Should(Succeed())
err = k8sClient.Create(context.TODO(), claim)
Expect(err).Should(Succeed(), "Failed to create Claim %s", claim)
err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
Expect(claim.Spec.Pool).To(Equal(pool1.Name), "expected pool name to match")
})
By("Auto Assign Claim (Memory)", func() {
claim := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "auto-assign-1",
Namespace: "ns-2-pool-assign",
},
Spec: capsulev1beta2.ResourcePoolClaimSpec{
ResourceClaims: corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("1"),
corev1.ResourceRequestsMemory: resource.MustParse("1"),
},
},
}
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: claim.Namespace,
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
"capsule.clastix.io/tenant": "admission-auto-assign",
},
},
}
err := k8sClient.Create(context.TODO(), ns)
Expect(err).Should(Succeed())
err = k8sClient.Create(context.TODO(), claim)
Expect(err).Should(Succeed(), "Failed to create Claim %s", claim)
err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
Expect(claim.Spec.Pool).To(Equal(pool2.Name), "expected pool name to match")
})
By("No Default available (Storage)", func() {
claim := &capsulev1beta2.ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "auto-assign-3",
Namespace: "ns-3-pool-assign",
},
Spec: capsulev1beta2.ResourcePoolClaimSpec{
ResourceClaims: corev1.ResourceList{
corev1.ResourceRequestsStorage: resource.MustParse("1"),
},
},
}
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: claim.Namespace,
Labels: map[string]string{
"e2e-resourcepoolclaims": "test",
"capsule.clastix.io/tenant": "admission-auto-assign",
},
},
}
err := k8sClient.Create(context.TODO(), ns)
Expect(err).Should(Succeed())
err = k8sClient.Create(context.TODO(), claim)
Expect(err).Should(Succeed(), "Failed to create Claim %s", claim)
err = k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, claim)
Expect(err).Should(Succeed())
Expect(claim.Spec.Pool).To(Equal(""), "expected pool name to match")
})
})
})
func isUnassignedCondition(claim *capsulev1beta2.ResourcePoolClaim) {
cl := &capsulev1beta2.ResourcePoolClaim{}
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, cl)
Expect(err).Should(Succeed())
Expect(cl.Status.Condition.Status).To(Equal(metav1.ConditionFalse), "failed to verify condition status")
Expect(cl.Status.Condition.Type).To(Equal(meta.AssignedCondition), "failed to verify condition type")
Expect(cl.Status.Condition.Reason).To(Equal(meta.FailedReason), "failed to verify condition reason")
}
func isBoundCondition(claim *capsulev1beta2.ResourcePoolClaim) {
cl := &capsulev1beta2.ResourcePoolClaim{}
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, cl)
Expect(err).Should(Succeed())
Expect(cl.Status.Condition.Status).To(Equal(metav1.ConditionTrue), "failed to verify condition status")
Expect(cl.Status.Condition.Type).To(Equal(meta.BoundCondition), "failed to verify condition type")
Expect(cl.Status.Condition.Reason).To(Equal(meta.SucceededReason), "failed to verify condition reason")
}

View File

@@ -20,7 +20,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("trying to escalate from a Tenant Namespace ServiceAccount", func() { var _ = Describe("trying to escalate from a Tenant Namespace ServiceAccount", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "sa-privilege-escalation", Name: "sa-privilege-escalation",

View File

@@ -16,7 +16,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace trying to select a third Tenant", func() { var _ = Describe("creating a Namespace trying to select a third Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-non-owned", Name: "tenant-non-owned",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace without a Tenant selector when user owns multiple Tenants", func() { var _ = Describe("creating a Namespace without a Tenant selector when user owns multiple Tenants", Label("tenant"), func() {
t1 := &capsulev1beta2.Tenant{ t1 := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-one", Name: "tenant-one",

View File

@@ -15,7 +15,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Namespace with Tenant selector when user owns multiple tenants", func() { var _ = Describe("creating a Namespace with Tenant selector when user owns multiple tenants", Label("tenant"), func() {
t1 := &capsulev1beta2.Tenant{ t1 := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-one", Name: "tenant-one",

View File

@@ -16,7 +16,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("creating a Service with user-specified labels and annotations", func() { var _ = Describe("creating a Service with user-specified labels and annotations", Label("tenant", "service"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-user-metadata-forbidden", Name: "tenant-user-metadata-forbidden",

View File

@@ -23,7 +23,7 @@ import (
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
) )
var _ = Describe("adding metadata to Service objects", func() { var _ = Describe("adding metadata to Service objects", Label("tenant", "service"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "service-metadata", Name: "service-metadata",

View File

@@ -26,7 +26,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("when Tenant handles Storage classes", func() { var _ = Describe("when Tenant handles Storage classes", Label("tenant", "storage"), func() {
tntNoDefaults := &capsulev1beta2.Tenant{ tntNoDefaults := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "storage-class-selector", Name: "storage-class-selector",

View File

@@ -5,9 +5,10 @@ package e2e
import ( import (
"context" "context"
"github.com/projectcapsule/capsule/pkg/utils"
"time" "time"
"github.com/projectcapsule/capsule/pkg/utils"
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
@@ -17,7 +18,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("cordoning a Tenant", func() { var _ = Describe("cordoning a Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-cordoning", Name: "tenant-cordoning",

View File

@@ -23,7 +23,7 @@ func getLabels(tnt capsulev1beta2.Tenant) (map[string]string, error) {
return current.GetLabels(), nil return current.GetLabels(), nil
} }
var _ = Describe("adding metadata to a Tenant", func() { var _ = Describe("adding metadata to a Tenant", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-metadata", Name: "tenant-metadata",

View File

@@ -13,7 +13,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating a Tenant with wrong name", func() { var _ = Describe("creating a Tenant with wrong name", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "non_rfc_dns_1123", Name: "non_rfc_dns_1123",

View File

@@ -14,7 +14,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("Deleting a tenant with protected annotation", func() { var _ = Describe("Deleting a tenant with protected annotation", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "protected-tenant", Name: "protected-tenant",

View File

@@ -21,7 +21,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("changing Tenant managed Kubernetes resources", func() { var _ = Describe("changing Tenant managed Kubernetes resources", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-resources-changes", Name: "tenant-resources-changes",

View File

@@ -21,7 +21,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
) )
var _ = Describe("creating namespaces within a Tenant with resources", func() { var _ = Describe("creating namespaces within a Tenant with resources", Label("tenant"), func() {
tnt := &capsulev1beta2.Tenant{ tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "tenant-resources", Name: "tenant-resources",

View File

@@ -24,7 +24,7 @@ import (
"github.com/projectcapsule/capsule/pkg/api" "github.com/projectcapsule/capsule/pkg/api"
) )
var _ = Describe("Creating a TenantResource object", func() { var _ = Describe("Creating a TenantResource object", Label("tenantresource"), func() {
solar := &capsulev1beta2.Tenant{ solar := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "energy-solar", Name: "energy-solar",

View File

@@ -6,6 +6,7 @@ package e2e
import ( import (
"context" "context"
"fmt" "fmt"
"reflect"
"strings" "strings"
"time" "time"
@@ -13,6 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -172,3 +174,57 @@ func GetKubernetesVersion() *versionUtil.Version {
return ver return ver
} }
func DeepCompare(expected, actual interface{}) (bool, string) {
expVal := reflect.ValueOf(expected)
actVal := reflect.ValueOf(actual)
// If the kinds differ, they are not equal.
if expVal.Kind() != actVal.Kind() {
return false, fmt.Sprintf("kind mismatch: %v vs %v", expVal.Kind(), actVal.Kind())
}
switch expVal.Kind() {
case reflect.Slice, reflect.Array:
// Convert slices to []interface{} for ElementsMatch.
expSlice := make([]interface{}, expVal.Len())
actSlice := make([]interface{}, actVal.Len())
for i := 0; i < expVal.Len(); i++ {
expSlice[i] = expVal.Index(i).Interface()
}
for i := 0; i < actVal.Len(); i++ {
actSlice[i] = actVal.Index(i).Interface()
}
// Use a dummy tester to capture error messages.
dummy := &dummyT{}
if !assert.ElementsMatch(dummy, expSlice, actSlice) {
return false, fmt.Sprintf("slice mismatch: %v", dummy.errors)
}
return true, ""
case reflect.Struct:
// Iterate over fields and compare recursively.
for i := 0; i < expVal.NumField(); i++ {
fieldName := expVal.Type().Field(i).Name
ok, msg := DeepCompare(expVal.Field(i).Interface(), actVal.Field(i).Interface())
if !ok {
return false, fmt.Sprintf("field %s mismatch: %s", fieldName, msg)
}
}
return true, ""
default:
// Fallback to reflect.DeepEqual for other types.
if !reflect.DeepEqual(expected, actual) {
return false, fmt.Sprintf("expected %v but got %v", expected, actual)
}
return true, ""
}
}
// dummyT implements a minimal TestingT for testify.
type dummyT struct {
errors []string
}
func (d *dummyT) Errorf(format string, args ...interface{}) {
d.errors = append(d.errors, fmt.Sprintf(format, args...))
}

49
pkg/api/selectors.go Normal file
View File

@@ -0,0 +1,49 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package api
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// Selector for resources and their labels or selecting origin namespaces
// +kubebuilder:object:generate=true
type NamespaceSelector struct {
// Select Items based on their labels. If the namespaceSelector is also set, the selector is applied
// to items within the selected namespaces. Otherwise for all the items.
*metav1.LabelSelector `json:",inline"`
}
// GetMatchingNamespaces retrieves the list of namespaces that match the NamespaceSelector.
func (s *NamespaceSelector) GetMatchingNamespaces(ctx context.Context, client client.Client) ([]corev1.Namespace, error) {
if s.LabelSelector == nil {
return nil, nil // No namespace selector means all namespaces
}
nsSelector, err := metav1.LabelSelectorAsSelector(s.LabelSelector)
if err != nil {
return nil, fmt.Errorf("invalid namespace selector: %w", err)
}
namespaceList := &corev1.NamespaceList{}
if err := client.List(ctx, namespaceList); err != nil {
return nil, fmt.Errorf("failed to list namespaces: %w", err)
}
var matchingNamespaces []corev1.Namespace
for _, ns := range namespaceList.Items {
if nsSelector.Matches(labels.Set(ns.Labels)) {
matchingNamespaces = append(matchingNamespaces, ns)
}
}
return matchingNamespaces, nil
}

32
pkg/api/status.go Normal file
View File

@@ -0,0 +1,32 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package api
import (
k8stypes "k8s.io/apimachinery/pkg/types"
)
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names
// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:object:generate=true
type Name string
func (n Name) String() string {
return string(n)
}
type StatusNameUID struct {
// Name
Name Name `json:"name,omitempty"`
// Namespace
Namespace Name `json:"namespace,omitempty"`
// UID of the tracked Tenant to pin point tracking
k8stypes.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid"`
}

View File

@@ -225,6 +225,26 @@ func (in *LimitRangesSpec) DeepCopy() *LimitRangesSpec {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSelector.
func (in *NamespaceSelector) DeepCopy() *NamespaceSelector {
if in == nil {
return nil
}
out := new(NamespaceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
*out = *in *out = *in

View File

@@ -17,6 +17,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2" capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/indexer/ingress" "github.com/projectcapsule/capsule/pkg/indexer/ingress"
"github.com/projectcapsule/capsule/pkg/indexer/namespace" "github.com/projectcapsule/capsule/pkg/indexer/namespace"
"github.com/projectcapsule/capsule/pkg/indexer/resourcepool"
"github.com/projectcapsule/capsule/pkg/indexer/tenant" "github.com/projectcapsule/capsule/pkg/indexer/tenant"
"github.com/projectcapsule/capsule/pkg/indexer/tenantresource" "github.com/projectcapsule/capsule/pkg/indexer/tenantresource"
"github.com/projectcapsule/capsule/pkg/utils" "github.com/projectcapsule/capsule/pkg/utils"
@@ -31,6 +32,8 @@ type CustomIndexer interface {
func AddToManager(ctx context.Context, log logr.Logger, mgr manager.Manager) error { func AddToManager(ctx context.Context, log logr.Logger, mgr manager.Manager) error {
indexers := []CustomIndexer{ indexers := []CustomIndexer{
tenant.NamespacesReference{Obj: &capsulev1beta2.Tenant{}}, tenant.NamespacesReference{Obj: &capsulev1beta2.Tenant{}},
resourcepool.NamespacesReference{Obj: &capsulev1beta2.ResourcePool{}},
resourcepool.PoolUIDReference{Obj: &capsulev1beta2.ResourcePoolClaim{}},
tenant.OwnerReference{}, tenant.OwnerReference{},
namespace.OwnerReference{}, namespace.OwnerReference{},
ingress.HostnamePath{Obj: &extensionsv1beta1.Ingress{}}, ingress.HostnamePath{Obj: &extensionsv1beta1.Ingress{}},

View File

@@ -0,0 +1,33 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package resourcepool
import (
"sigs.k8s.io/controller-runtime/pkg/client"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
)
type PoolUIDReference struct {
Obj client.Object
}
func (o PoolUIDReference) Object() client.Object {
return o.Obj
}
func (o PoolUIDReference) Field() string {
return ".status.pool.uid"
}
func (o PoolUIDReference) Func() client.IndexerFunc {
return func(object client.Object) []string {
grq, ok := object.(*capsulev1beta2.ResourcePoolClaim)
if !ok {
return nil
}
return []string{string(grq.Status.Pool.UID)}
}
}

View File

@@ -0,0 +1,34 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package resourcepool
import (
"sigs.k8s.io/controller-runtime/pkg/client"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
)
// NamespacesReference defines the indexer logic for GlobalResourceQuota namespaces.
type NamespacesReference struct {
Obj client.Object
}
func (o NamespacesReference) Object() client.Object {
return o.Obj
}
func (o NamespacesReference) Field() string {
return ".status.namespaces"
}
func (o NamespacesReference) Func() client.IndexerFunc {
return func(object client.Object) []string {
rp, ok := object.(*capsulev1beta2.ResourcePool)
if !ok {
return nil
}
return rp.Status.Namespaces
}
}

45
pkg/meta/annotations.go Normal file
View File

@@ -0,0 +1,45 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package meta
import (
"strings"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
ReleaseAnnotation = "projectcapsule.dev/release"
ReleaseAnnotationTrigger = "true"
)
func ReleaseAnnotationTriggers(obj client.Object) bool {
return annotationTriggers(obj, ReleaseAnnotation, ReleaseAnnotationTrigger)
}
func ReleaseAnnotationRemove(obj client.Object) {
annotationRemove(obj, ReleaseAnnotation)
}
func annotationRemove(obj client.Object, anno string) {
annotations := obj.GetAnnotations()
if _, ok := annotations[anno]; ok {
delete(annotations, anno)
obj.SetAnnotations(annotations)
}
}
func annotationTriggers(obj client.Object, anno string, trigger string) bool {
annotations := obj.GetAnnotations()
if val, ok := annotations[anno]; ok {
if strings.ToLower(val) == trigger {
return true
}
}
return false
}

41
pkg/meta/conditions.go Normal file
View File

@@ -0,0 +1,41 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package meta
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
// ReadyCondition indicates the resource is ready and fully reconciled.
ReadyCondition string = "Ready"
NotReadyCondition string = "NotReady"
AssignedCondition string = "Assigned"
BoundCondition string = "Bound"
// FailedReason indicates a condition or event observed a failure (Claim Rejected).
SucceededReason string = "Succeeded"
FailedReason string = "Failed"
PoolExhaustedReason string = "PoolExhausted"
QueueExhaustedReason string = "QueueExhausted"
NamespaceExhaustedReason string = "NamespaceExhausted"
)
func NewBoundCondition(obj client.Object) metav1.Condition {
return metav1.Condition{
Type: BoundCondition,
ObservedGeneration: obj.GetGeneration(),
LastTransitionTime: metav1.Now(),
}
}
func NewAssignedCondition(obj client.Object) metav1.Condition {
return metav1.Condition{
Type: AssignedCondition,
ObservedGeneration: obj.GetGeneration(),
LastTransitionTime: metav1.Now(),
}
}

8
pkg/meta/finalizers.go Normal file
View File

@@ -0,0 +1,8 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package meta
const (
ControllerFinalizer = "controller.projectcapsule.dev/finalize"
)

45
pkg/meta/labels.go Normal file
View File

@@ -0,0 +1,45 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package meta
import (
"strings"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
FreezeLabel = "projectcapsule.dev/freeze"
FreezeLabelTrigger = "true"
)
func FreezeLabelTriggers(obj client.Object) bool {
return labelTriggers(obj, FreezeLabel, FreezeLabelTrigger)
}
func FreezeLabelRemove(obj client.Object) {
labelRemove(obj, FreezeLabel)
}
func labelRemove(obj client.Object, anno string) {
annotations := obj.GetLabels()
if _, ok := annotations[anno]; ok {
delete(annotations, anno)
obj.SetLabels(annotations)
}
}
func labelTriggers(obj client.Object, anno string, trigger string) bool {
annotations := obj.GetLabels()
if val, ok := annotations[anno]; ok {
if strings.ToLower(val) == trigger {
return true
}
}
return false
}

View File

@@ -0,0 +1,69 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package meta
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// Adds an ownerreferences, which does not delete the object when the owner is deleted.
func SetLooseOwnerReference(
obj client.Object,
owner client.Object,
schema *runtime.Scheme,
) (err error) {
err = controllerutil.SetOwnerReference(owner, obj, schema)
if err != nil {
return err
}
ownerRefs := obj.GetOwnerReferences()
for i, ownerRef := range ownerRefs {
if ownerRef.UID == owner.GetUID() {
if ownerRef.BlockOwnerDeletion != nil || ownerRef.Controller != nil {
ownerRefs[i].BlockOwnerDeletion = nil
ownerRefs[i].Controller = nil
}
break
}
}
return nil
}
// Removes a Loose Ownerreference based on UID.
func RemoveLooseOwnerReference(
obj client.Object,
owner client.Object,
) {
refs := []metav1.OwnerReference{}
for _, ownerRef := range obj.GetOwnerReferences() {
if ownerRef.UID == owner.GetUID() {
continue
}
refs = append(refs, ownerRef)
}
obj.SetOwnerReferences(refs)
}
// If not returns false.
func HasLooseOwnerReference(
obj client.Object,
owner client.Object,
) bool {
for _, ownerRef := range obj.GetOwnerReferences() {
if ownerRef.UID == owner.GetUID() {
return true
}
}
return false
}

Some files were not shown because too many files have changed in this diff Show More