Fix conflicts

Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
This commit is contained in:
faizanahmad055
2026-05-11 10:55:54 +02:00
89 changed files with 16907 additions and 8021 deletions

283
scripts/e2e-cluster-cleanup.sh Executable file
View File

@@ -0,0 +1,283 @@
#!/bin/bash
# Cleanup script for e2e test cluster
# Run this after e2e tests complete: ./scripts/e2e-cluster-cleanup.sh
#
# This removes:
# - Reloader test resources (namespaces, cluster roles, etc.)
# - Vault and its namespace
# - CSI Secrets Store Driver
# - Argo Rollouts
#
# Resources are removed in reverse dependency order.
set -euo pipefail
# =============================================================================
# Configuration
# =============================================================================
ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}"
ARGO_ROLLOUTS_NAMESPACE="argo-rollouts"
CSI_DRIVER_VERSION="${CSI_DRIVER_VERSION:-1.5.5}"
CSI_NAMESPACE="kube-system"
VAULT_NAMESPACE="vault"
# =============================================================================
# Helper Functions
# =============================================================================
log_header() {
echo ""
echo "=== $1 ==="
}
log_info() {
echo "$1"
}
log_success() {
echo "$1"
}
log_warning() {
echo "$1"
}
log_error() {
echo "$1" >&2
}
check_command() {
if ! command -v "$1" &> /dev/null; then
log_error "$1 is not installed or not in PATH"
return 1
fi
return 0
}
# Safe delete that ignores "not found" errors
safe_delete() {
kubectl delete "$@" --ignore-not-found 2>/dev/null || true
}
# =============================================================================
# Dependency Checks
# =============================================================================
check_dependencies() {
log_header "Checking Dependencies"
if ! check_command kubectl; then
log_error "kubectl is required for cleanup"
exit 1
fi
log_success "Dependencies available"
}
check_cluster_connectivity() {
log_header "Checking Cluster Connectivity"
if ! kubectl cluster-info &> /dev/null; then
log_error "Cannot connect to Kubernetes cluster"
exit 1
fi
local context
context=$(kubectl config current-context)
log_success "Connected to cluster (context: $context)"
}
# =============================================================================
# Reloader Test Resources Cleanup
# =============================================================================
cleanup_reloader_resources() {
log_header "Cleaning Up Reloader Test Resources"
# Delete test namespaces (created by test suites)
log_info "Deleting test namespaces..."
local namespaces
namespaces=$(kubectl get namespaces -o name 2>/dev/null | grep "reloader-" | cut -d/ -f2 || true)
if [[ -n "$namespaces" ]]; then
for ns in $namespaces; do
log_info " Deleting namespace: $ns"
kubectl delete namespace "$ns" --ignore-not-found --wait=false 2>/dev/null || true
done
else
log_info " No test namespaces found"
fi
# Delete Reloader cluster-scoped resources
log_info "Deleting cluster roles..."
local clusterroles
clusterroles=$(kubectl get clusterrole -o name 2>/dev/null | grep "reloader-" | cut -d/ -f2 || true)
for cr in $clusterroles; do
log_info " Deleting ClusterRole: $cr"
safe_delete clusterrole "$cr"
done
log_info "Deleting cluster role bindings..."
local clusterrolebindings
clusterrolebindings=$(kubectl get clusterrolebinding -o name 2>/dev/null | grep "reloader-" | cut -d/ -f2 || true)
for crb in $clusterrolebindings; do
log_info " Deleting ClusterRoleBinding: $crb"
safe_delete clusterrolebinding "$crb"
done
log_success "Reloader test resources cleaned up"
}
# =============================================================================
# Vault Cleanup
# =============================================================================
cleanup_vault() {
log_header "Uninstalling Vault"
# Check if Vault is installed
if ! kubectl get namespace "$VAULT_NAMESPACE" &> /dev/null; then
log_info "Vault namespace not found, skipping"
return 0
fi
# Uninstall via Helm if available
if command -v helm &> /dev/null; then
if helm list -n "$VAULT_NAMESPACE" 2>/dev/null | grep -q vault; then
log_info "Uninstalling Vault via Helm..."
helm uninstall vault -n "$VAULT_NAMESPACE" --wait --timeout 60s 2>/dev/null || true
fi
fi
# Delete namespace
log_info "Deleting Vault namespace..."
safe_delete namespace "$VAULT_NAMESPACE" --timeout=60s
log_success "Vault cleaned up"
}
# =============================================================================
# CSI Secrets Store Driver Cleanup
# =============================================================================
cleanup_csi_driver() {
log_header "Uninstalling CSI Secrets Store Driver"
# Delete all SecretProviderClass resources first
log_info "Deleting SecretProviderClass resources..."
kubectl delete secretproviderclasses.secrets-store.csi.x-k8s.io \
--all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true
log_info "Deleting SecretProviderClassPodStatus resources..."
kubectl delete secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io \
--all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true
# Uninstall via Helm if available
if command -v helm &> /dev/null; then
if helm list -n "$CSI_NAMESPACE" 2>/dev/null | grep -q csi-secrets-store; then
log_info "Uninstalling CSI Secrets Store Driver via Helm..."
helm uninstall csi-secrets-store -n "$CSI_NAMESPACE" --wait --timeout 60s 2>/dev/null || true
fi
else
# Fallback to kubectl delete
log_info "Deleting CSI Secrets Store Driver resources via kubectl..."
local csi_url="https://raw.githubusercontent.com/kubernetes-sigs/secrets-store-csi-driver/v${CSI_DRIVER_VERSION}/deploy/secrets-store-csi-driver.yaml"
kubectl delete -f "$csi_url" --ignore-not-found --timeout=60s 2>/dev/null || true
fi
# Delete CRDs
log_info "Deleting CSI Secrets Store CRDs..."
local csi_crds="secretproviderclasses.secrets-store.csi.x-k8s.io secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io"
for crd in $csi_crds; do
safe_delete crd "$crd" --timeout=30s
done
log_success "CSI Secrets Store Driver cleaned up"
}
# =============================================================================
# Argo Rollouts Cleanup
# =============================================================================
cleanup_argo_rollouts() {
log_header "Uninstalling Argo Rollouts"
# Check if Argo Rollouts is installed
if ! kubectl get namespace "$ARGO_ROLLOUTS_NAMESPACE" &> /dev/null; then
log_info "Argo Rollouts namespace not found, skipping"
return 0
fi
# Stop the controller first
log_info "Stopping Argo Rollouts controller..."
safe_delete deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" --timeout=30s
# Delete all Argo Rollouts custom resources to avoid finalizer issues
log_info "Deleting Argo Rollouts custom resources..."
local argo_resources="rollouts analysisruns analysistemplates experiments"
for res in $argo_resources; do
kubectl delete "${res}.argoproj.io" --all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true
done
# Delete using the install manifest
log_info "Deleting Argo Rollouts installation..."
local argo_url="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml"
kubectl delete -f "$argo_url" --ignore-not-found --timeout=60s 2>/dev/null || true
# Give resources time to be cleaned up
sleep 2
# Delete CRDs
log_info "Deleting Argo Rollouts CRDs..."
local argo_crds="rollouts.argoproj.io analysisruns.argoproj.io analysistemplates.argoproj.io clusteranalysistemplates.argoproj.io experiments.argoproj.io"
for crd in $argo_crds; do
safe_delete crd "$crd" --timeout=30s
done
# Delete namespace
log_info "Deleting Argo Rollouts namespace..."
safe_delete namespace "$ARGO_ROLLOUTS_NAMESPACE" --timeout=30s
# Delete cluster-scoped RBAC
log_info "Deleting Argo Rollouts cluster RBAC..."
safe_delete clusterrole argo-rollouts argo-rollouts-aggregate-to-admin argo-rollouts-aggregate-to-edit argo-rollouts-aggregate-to-view
safe_delete clusterrolebinding argo-rollouts
log_success "Argo Rollouts cleaned up"
}
# =============================================================================
# Main
# =============================================================================
main() {
echo "=== E2E Cluster Cleanup ==="
# Pre-flight checks
check_dependencies
check_cluster_connectivity
# Cleanup in reverse dependency order
# 1. First cleanup test resources (they depend on everything else)
cleanup_reloader_resources
# 2. Then Vault (depends on CSI driver)
cleanup_vault
# 3. Then CSI driver
cleanup_csi_driver
# 4. Finally Argo Rollouts (independent)
cleanup_argo_rollouts
# Summary
log_header "E2E Cluster Cleanup Complete"
echo ""
echo "Removed components:"
echo " ✓ Reloader test namespaces and cluster resources"
echo " ✓ Vault"
echo " ✓ CSI Secrets Store Driver"
echo " ✓ Argo Rollouts"
}
main "$@"

351
scripts/e2e-cluster-setup.sh Executable file
View File

@@ -0,0 +1,351 @@
#!/bin/bash
# Setup script for e2e test cluster
# Run this before running e2e tests: ./scripts/e2e-cluster-setup.sh
#
# This installs:
# - Argo Rollouts (for Rollout workload testing)
# - CSI Secrets Store Driver (for SecretProviderClass testing)
# - Vault with CSI Provider (as the secrets backend for CSI)
#
# All versions are pinned for reproducibility and can be overridden via environment variables.
set -euo pipefail
# =============================================================================
# Configuration (all versions pinned for reproducibility)
# =============================================================================
# Argo Rollouts
ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}"
ARGO_ROLLOUTS_NAMESPACE="argo-rollouts"
# CSI Secrets Store Driver
CSI_DRIVER_VERSION="${CSI_DRIVER_VERSION:-1.5.5}"
CSI_NAMESPACE="kube-system"
# Vault (HashiCorp)
VAULT_CHART_VERSION="${VAULT_CHART_VERSION:-0.31.0}"
VAULT_VERSION="${VAULT_VERSION:-1.20.4}"
VAULT_CSI_PROVIDER_VERSION="${VAULT_CSI_PROVIDER_VERSION:-1.7.0}"
VAULT_NAMESPACE="vault"
# =============================================================================
# Helper Functions
# =============================================================================
log_header() {
echo ""
echo "=== $1 ==="
}
log_info() {
echo "$1"
}
log_success() {
echo "$1"
}
log_warning() {
echo "$1"
}
log_error() {
echo "$1" >&2
}
check_command() {
if ! command -v "$1" &> /dev/null; then
log_error "$1 is not installed or not in PATH"
return 1
fi
return 0
}
wait_for_rollout() {
local resource_type="$1"
local resource_name="$2"
local namespace="$3"
local timeout="${4:-180s}"
kubectl rollout status "$resource_type/$resource_name" -n "$namespace" --timeout="$timeout"
}
wait_for_condition() {
local condition="$1"
local resource="$2"
local namespace="${3:-}"
local timeout="${4:-60s}"
if [[ -n "$namespace" ]]; then
kubectl wait --for="condition=$condition" "$resource" -n "$namespace" --timeout="$timeout"
else
kubectl wait --for="condition=$condition" "$resource" --timeout="$timeout"
fi
}
# =============================================================================
# Dependency Checks
# =============================================================================
check_dependencies() {
log_header "Checking Dependencies"
local missing_deps=()
# Required: kubectl
if ! check_command kubectl; then
missing_deps+=("kubectl")
fi
# Required: helm (for CSI driver and Vault installation)
if ! check_command helm; then
missing_deps+=("helm")
fi
if [[ ${#missing_deps[@]} -gt 0 ]]; then
log_error "Missing required dependencies: ${missing_deps[*]}"
log_error "Please install the missing tools and try again."
exit 1
fi
log_success "All required dependencies are available"
}
check_cluster_connectivity() {
log_header "Checking Cluster Connectivity"
if ! kubectl cluster-info &> /dev/null; then
log_error "Cannot connect to Kubernetes cluster"
log_error "Please ensure your kubeconfig is correctly configured"
exit 1
fi
local context
context=$(kubectl config current-context)
log_success "Connected to cluster (context: $context)"
}
# =============================================================================
# Argo Rollouts Installation
# =============================================================================
install_argo_rollouts() {
log_header "Installing Argo Rollouts ${ARGO_ROLLOUTS_VERSION}"
# Check if already installed
if kubectl get crd rollouts.argoproj.io &> /dev/null; then
if kubectl get deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" &> /dev/null; then
log_success "Argo Rollouts is already installed"
return 0
fi
log_info "Argo Rollouts CRD exists but controller not running, reinstalling..."
fi
# Create namespace
kubectl create namespace "$ARGO_ROLLOUTS_NAMESPACE" 2>/dev/null || true
# Install from official manifest
local argo_url="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml"
log_info "Applying manifest from: $argo_url"
kubectl apply -n "$ARGO_ROLLOUTS_NAMESPACE" -f "$argo_url"
# Wait for deployment to be created
sleep 2
# Patch deployment to remove resource requirements (for Kind cluster compatibility)
log_info "Patching deployment for Kind compatibility..."
local patch_json='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]'
if ! kubectl patch deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" --type=json -p "$patch_json" 2>/dev/null; then
patch_json='{"spec":{"template":{"spec":{"containers":[{"name":"argo-rollouts","resources":{"limits":null,"requests":null}}]}}}}'
kubectl patch deployment argo-rollouts -n "$ARGO_ROLLOUTS_NAMESPACE" --type=strategic -p "$patch_json" 2>/dev/null || true
fi
# Wait for controller to be ready
log_info "Waiting for Argo Rollouts controller..."
wait_for_condition "available" "deployment/argo-rollouts" "$ARGO_ROLLOUTS_NAMESPACE" "180s"
wait_for_condition "established" "crd/rollouts.argoproj.io" "" "60s"
log_success "Argo Rollouts ${ARGO_ROLLOUTS_VERSION} installed"
}
# =============================================================================
# CSI Secrets Store Driver Installation
# =============================================================================
install_csi_driver() {
log_header "Installing CSI Secrets Store Driver ${CSI_DRIVER_VERSION}"
# Check if already installed
if kubectl get crd secretproviderclasses.secrets-store.csi.x-k8s.io &> /dev/null; then
if kubectl get daemonset -n "$CSI_NAMESPACE" -l app=secrets-store-csi-driver &> /dev/null 2>&1; then
log_success "CSI Secrets Store Driver is already installed"
return 0
fi
log_info "CSI Driver CRD exists but DaemonSet not found, installing..."
fi
# Add Helm repo
helm repo add secrets-store-csi-driver https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts 2>/dev/null || true
helm repo update secrets-store-csi-driver
# Install via Helm with pinned version
log_info "Installing via Helm (version ${CSI_DRIVER_VERSION})..."
helm upgrade --install csi-secrets-store secrets-store-csi-driver/secrets-store-csi-driver \
--namespace "$CSI_NAMESPACE" \
--version "$CSI_DRIVER_VERSION" \
--set syncSecret.enabled=true \
--set enableSecretRotation=true \
--set rotationPollInterval=2s \
--wait \
--timeout 180s
# Wait for CRDs to be established
log_info "Waiting for CRDs to be established..."
wait_for_condition "established" "crd/secretproviderclasses.secrets-store.csi.x-k8s.io" "" "60s"
wait_for_condition "established" "crd/secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io" "" "60s"
# Wait for DaemonSet to be ready (try different names as they vary by installation method)
log_info "Waiting for CSI driver pods..."
kubectl rollout status daemonset/csi-secrets-store-secrets-store-csi-driver -n "$CSI_NAMESPACE" --timeout=180s 2>/dev/null || \
kubectl rollout status daemonset/secrets-store-csi-driver -n "$CSI_NAMESPACE" --timeout=180s 2>/dev/null || \
log_warning "Could not verify DaemonSet status (name may vary)"
log_success "CSI Secrets Store Driver ${CSI_DRIVER_VERSION} installed"
}
# =============================================================================
# Vault Installation
# =============================================================================
install_vault() {
log_header "Installing Vault ${VAULT_VERSION} (Chart ${VAULT_CHART_VERSION})"
# Check if already installed
if kubectl get pods -n "$VAULT_NAMESPACE" -l app.kubernetes.io/name=vault 2>/dev/null | grep -q Running; then
log_success "Vault is already installed and running"
return 0
fi
# Add Helm repo
helm repo add hashicorp https://helm.releases.hashicorp.com 2>/dev/null || true
helm repo update hashicorp
# Install Vault in dev mode with CSI provider
# Dev mode: single server, in-memory storage, pre-unsealed, root token = "root"
log_info "Installing Vault via Helm..."
helm upgrade --install vault hashicorp/vault \
--namespace "$VAULT_NAMESPACE" \
--create-namespace \
--version "$VAULT_CHART_VERSION" \
--set "server.image.tag=${VAULT_VERSION}" \
--set "server.dev.enabled=true" \
--set "server.dev.devRootToken=root" \
--set "server.resources.requests.memory=64Mi" \
--set "server.resources.requests.cpu=50m" \
--set "server.resources.limits.memory=128Mi" \
--set "server.resources.limits.cpu=100m" \
--set "injector.enabled=false" \
--set "csi.enabled=true" \
--set "csi.image.tag=${VAULT_CSI_PROVIDER_VERSION}" \
--set "csi.resources.requests.memory=64Mi" \
--set "csi.resources.requests.cpu=50m" \
--set "csi.resources.limits.memory=128Mi" \
--set "csi.resources.limits.cpu=100m" \
--wait \
--timeout 180s
# Wait for pods to be ready
log_info "Waiting for Vault pod..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=vault -n "$VAULT_NAMESPACE" --timeout=120s
log_info "Waiting for Vault CSI provider..."
wait_for_rollout "daemonset" "vault-csi-provider" "$VAULT_NAMESPACE" "120s"
log_success "Vault ${VAULT_VERSION} installed"
}
configure_vault() {
log_header "Configuring Vault for Kubernetes Authentication"
# Enable KV secrets engine (ignore error if already enabled - dev mode has it by default)
log_info "Enabling KV secrets engine..."
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault secrets enable -path=secret kv-v2 2>/dev/null || true
# Create test secrets for e2e tests
log_info "Creating test secrets..."
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/test username="test-user" password="test-password"
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/app1 api_key="app1-api-key-v1" db_password="app1-db-pass-v1"
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/app2 api_key="app2-api-key-v1" db_password="app2-db-pass-v1"
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault kv put secret/rotation-test value="initial-value-v1"
# Enable Kubernetes auth method
log_info "Enabling Kubernetes auth..."
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault auth enable kubernetes 2>/dev/null || true
# Configure Kubernetes auth to use in-cluster config
log_info "Configuring Kubernetes auth..."
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- sh -c \
'vault write auth/kubernetes/config kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443"'
# Create policy for reading test secrets
log_info "Creating Vault policy..."
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- sh -c 'vault policy write test-policy - <<EOF
path "secret/data/*" {
capabilities = ["read"]
}
EOF'
# Create role that binds to any service account (for e2e tests)
log_info "Creating Vault role..."
kubectl exec -n "$VAULT_NAMESPACE" vault-0 -- vault write auth/kubernetes/role/test-role \
bound_service_account_names="*" \
bound_service_account_namespaces="*" \
policies=test-policy \
ttl=1h
log_success "Vault configured for CSI testing"
}
# =============================================================================
# Main
# =============================================================================
main() {
echo "=== E2E Cluster Setup ==="
echo ""
echo "Versions:"
echo " Argo Rollouts: ${ARGO_ROLLOUTS_VERSION}"
echo " CSI Driver: ${CSI_DRIVER_VERSION}"
echo " Vault Chart: ${VAULT_CHART_VERSION}"
echo " Vault Server: ${VAULT_VERSION}"
echo " Vault CSI Provider: ${VAULT_CSI_PROVIDER_VERSION}"
# Pre-flight checks
check_dependencies
check_cluster_connectivity
# Install components in dependency order
install_argo_rollouts
install_csi_driver
install_vault
configure_vault
# Summary
log_header "E2E Cluster Setup Complete"
echo ""
echo "Installed components:"
echo " ✓ Argo Rollouts ${ARGO_ROLLOUTS_VERSION}"
echo " ✓ CSI Secrets Store Driver ${CSI_DRIVER_VERSION}"
echo " ✓ Vault ${VAULT_VERSION} (CSI Provider ${VAULT_CSI_PROVIDER_VERSION})"
echo ""
echo "Vault is running in dev mode with root token: root"
echo "Test secrets created at: secret/test, secret/app1, secret/app2, secret/rotation-test"
echo ""
echo "You can now run e2e tests:"
echo " make e2e"
echo " # or"
echo " SKIP_BUILD=true RELOADER_IMAGE=ghcr.io/stakater/reloader:test go test -v ./test/e2e/..."
}
main "$@"