mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
chore: A lot of cleanup
This commit is contained in:
1
.github/workflows/loadtest.yml
vendored
1
.github/workflows/loadtest.yml
vendored
@@ -10,7 +10,6 @@ permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
# Full load test suite triggered by /loadtest command
|
||||
loadtest:
|
||||
# Only run on PR comments with /loadtest command
|
||||
if: |
|
||||
|
||||
@@ -110,7 +110,6 @@ func NewController(
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
// Record event received
|
||||
c.collectors.RecordEventReceived("add", c.resource)
|
||||
|
||||
switch object := obj.(type) {
|
||||
@@ -127,7 +126,7 @@ func (c *Controller) Add(obj interface{}) {
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
EnqueueTime: time.Now(), // Track when item was enqueued
|
||||
EnqueueTime: time.Now(),
|
||||
})
|
||||
} else {
|
||||
c.collectors.RecordSkipped("ignored_or_not_selected")
|
||||
@@ -186,7 +185,6 @@ func (c *Controller) removeSelectedNamespaceFromCache(namespace v1.Namespace) {
|
||||
|
||||
// Update function to add an old object and a new object to the queue in case of updating a resource
|
||||
func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
// Record event received
|
||||
c.collectors.RecordEventReceived("update", c.resource)
|
||||
|
||||
switch new.(type) {
|
||||
@@ -200,7 +198,7 @@ func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
OldResource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
EnqueueTime: time.Now(), // Track when item was enqueued
|
||||
EnqueueTime: time.Now(),
|
||||
})
|
||||
} else {
|
||||
c.collectors.RecordSkipped("ignored_or_not_selected")
|
||||
@@ -209,7 +207,6 @@ func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
|
||||
// Delete function to add an object to the queue in case of deleting a resource
|
||||
func (c *Controller) Delete(old interface{}) {
|
||||
// Record event received
|
||||
c.collectors.RecordEventReceived("delete", c.resource)
|
||||
|
||||
if _, ok := old.(*csiv1.SecretProviderClassPodStatus); ok {
|
||||
@@ -222,7 +219,7 @@ func (c *Controller) Delete(old interface{}) {
|
||||
Resource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
EnqueueTime: time.Now(), // Track when item was enqueued
|
||||
EnqueueTime: time.Now(),
|
||||
})
|
||||
} else {
|
||||
c.collectors.RecordSkipped("ignored_or_not_selected")
|
||||
@@ -285,7 +282,6 @@ func (c *Controller) processNextItem() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Update queue depth after getting item
|
||||
c.collectors.SetQueueDepth(c.queue.Len())
|
||||
|
||||
// Tell the queue that we are done with processing this key. This unblocks the key for other workers
|
||||
@@ -307,7 +303,6 @@ func (c *Controller) processNextItem() bool {
|
||||
|
||||
duration := time.Since(startTime)
|
||||
|
||||
// Record reconcile metrics
|
||||
if err != nil {
|
||||
c.collectors.RecordReconcile("error", duration)
|
||||
} else {
|
||||
@@ -355,7 +350,6 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
logrus.Errorf("Dropping key out of the queue: %v", err)
|
||||
logrus.Debugf("Dropping the key %q out of the queue: %v", key, err)
|
||||
|
||||
// Record failed event processing
|
||||
c.collectors.RecordEventProcessed("unknown", c.resource, "dropped")
|
||||
}
|
||||
|
||||
|
||||
@@ -219,8 +219,6 @@ func NewCollectors() Collectors {
|
||||
[]string{"success", "namespace"},
|
||||
)
|
||||
|
||||
// === NEW: Comprehensive metrics ===
|
||||
|
||||
reconcileTotal := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
@@ -372,7 +370,6 @@ func NewCollectors() Collectors {
|
||||
func SetupPrometheusEndpoint() Collectors {
|
||||
collectors := NewCollectors()
|
||||
|
||||
// Register all metrics
|
||||
prometheus.MustRegister(collectors.Reloaded)
|
||||
prometheus.MustRegister(collectors.ReconcileTotal)
|
||||
prometheus.MustRegister(collectors.ReconcileDuration)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package main is the entrypoint for the load test CLI.
|
||||
package main
|
||||
|
||||
import "github.com/stakater/Reloader/test/loadtest/internal/cmd"
|
||||
|
||||
@@ -31,16 +31,13 @@ func NewManager(cfg Config) *Manager {
|
||||
// DetectContainerRuntime finds available container runtime.
|
||||
// It checks if the runtime daemon is actually running, not just if the binary exists.
|
||||
func DetectContainerRuntime() (string, error) {
|
||||
// Prefer docker as it's more commonly used with kind
|
||||
if _, err := exec.LookPath("docker"); err == nil {
|
||||
// Verify docker daemon is running
|
||||
cmd := exec.Command("docker", "info")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return "docker", nil
|
||||
}
|
||||
}
|
||||
if _, err := exec.LookPath("podman"); err == nil {
|
||||
// Verify podman is functional (check if we can run a basic command)
|
||||
cmd := exec.Command("podman", "info")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return "podman", nil
|
||||
@@ -85,7 +82,6 @@ func (m *Manager) Create(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate unique ports based on offset (for parallel clusters)
|
||||
httpPort := 8080 + m.cfg.PortOffset
|
||||
httpsPort := 8443 + m.cfg.PortOffset
|
||||
|
||||
@@ -231,7 +227,6 @@ func (m *Manager) Name() string {
|
||||
|
||||
// LoadImage loads a container image into the kind cluster.
|
||||
func (m *Manager) LoadImage(ctx context.Context, image string) error {
|
||||
// First check if image exists locally
|
||||
if !m.imageExistsLocally(image) {
|
||||
fmt.Printf(" Image not found locally, pulling: %s\n", image)
|
||||
pullCmd := exec.CommandContext(ctx, m.cfg.ContainerRuntime, "pull", image)
|
||||
@@ -247,7 +242,6 @@ func (m *Manager) LoadImage(ctx context.Context, image string) error {
|
||||
fmt.Printf(" Copying image to kind cluster...\n")
|
||||
|
||||
if m.cfg.ContainerRuntime == "podman" {
|
||||
// For podman, save to archive and load
|
||||
tmpFile := fmt.Sprintf("/tmp/kind-image-%d.tar", time.Now().UnixNano())
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
@@ -276,19 +270,16 @@ func (m *Manager) LoadImage(ctx context.Context, image string) error {
|
||||
|
||||
// imageExistsLocally checks if an image exists in the local container runtime.
|
||||
func (m *Manager) imageExistsLocally(image string) bool {
|
||||
// Try "image exists" command (works for podman)
|
||||
cmd := exec.Command(m.cfg.ContainerRuntime, "image", "exists", image)
|
||||
if err := cmd.Run(); err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Try "image inspect" (works for both docker and podman)
|
||||
cmd = exec.Command(m.cfg.ContainerRuntime, "image", "inspect", image)
|
||||
if err := cmd.Run(); err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Try listing images and grep
|
||||
cmd = exec.Command(m.cfg.ContainerRuntime, "images", "--format", "{{.Repository}}:{{.Tag}}")
|
||||
out, err := cmd.Output()
|
||||
if err == nil {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package cmd implements the CLI commands for the load test tool.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
|
||||
@@ -21,14 +21,14 @@ type Manager struct {
|
||||
manifestPath string
|
||||
portForward *exec.Cmd
|
||||
localPort int
|
||||
kubeContext string // Optional: use specific kubeconfig context
|
||||
kubeContext string
|
||||
}
|
||||
|
||||
// NewManager creates a new Prometheus manager.
|
||||
func NewManager(manifestPath string) *Manager {
|
||||
return &Manager{
|
||||
manifestPath: manifestPath,
|
||||
localPort: 9091, // Use 9091 to avoid conflicts
|
||||
localPort: 9091,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,6 @@ func (m *Manager) kubectl(args ...string) []string {
|
||||
|
||||
// Deploy deploys Prometheus to the cluster.
|
||||
func (m *Manager) Deploy(ctx context.Context) error {
|
||||
// Create namespace
|
||||
cmd := exec.CommandContext(ctx, "kubectl", m.kubectl("create", "namespace", "monitoring", "--dry-run=client", "-o", "yaml")...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -64,7 +63,6 @@ func (m *Manager) Deploy(ctx context.Context) error {
|
||||
return fmt.Errorf("applying namespace: %w", err)
|
||||
}
|
||||
|
||||
// Apply Prometheus manifest
|
||||
applyCmd = exec.CommandContext(ctx, "kubectl", m.kubectl("apply", "-f", m.manifestPath)...)
|
||||
applyCmd.Stdout = os.Stdout
|
||||
applyCmd.Stderr = os.Stderr
|
||||
@@ -72,7 +70,6 @@ func (m *Manager) Deploy(ctx context.Context) error {
|
||||
return fmt.Errorf("applying prometheus manifest: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Prometheus to be ready
|
||||
fmt.Println("Waiting for Prometheus to be ready...")
|
||||
waitCmd := exec.CommandContext(ctx, "kubectl", m.kubectl("wait", "--for=condition=ready", "pod",
|
||||
"-l", "app=prometheus", "-n", "monitoring", "--timeout=120s")...)
|
||||
@@ -89,7 +86,6 @@ func (m *Manager) Deploy(ctx context.Context) error {
|
||||
func (m *Manager) StartPortForward(ctx context.Context) error {
|
||||
m.StopPortForward()
|
||||
|
||||
// Start port-forward
|
||||
m.portForward = exec.CommandContext(ctx, "kubectl", m.kubectl("port-forward",
|
||||
"-n", "monitoring", "svc/prometheus", fmt.Sprintf("%d:9090", m.localPort))...)
|
||||
|
||||
@@ -97,7 +93,6 @@ func (m *Manager) StartPortForward(ctx context.Context) error {
|
||||
return fmt.Errorf("starting port-forward: %w", err)
|
||||
}
|
||||
|
||||
// Wait for port-forward to be ready
|
||||
for i := 0; i < 30; i++ {
|
||||
time.Sleep(time.Second)
|
||||
if m.isAccessible() {
|
||||
@@ -115,7 +110,6 @@ func (m *Manager) StopPortForward() {
|
||||
m.portForward.Process.Kill()
|
||||
m.portForward = nil
|
||||
}
|
||||
// Also kill any lingering port-forwards
|
||||
exec.Command("pkill", "-f", fmt.Sprintf("kubectl port-forward.*prometheus.*%d", m.localPort)).Run()
|
||||
}
|
||||
|
||||
@@ -123,12 +117,10 @@ func (m *Manager) StopPortForward() {
|
||||
func (m *Manager) Reset(ctx context.Context) error {
|
||||
m.StopPortForward()
|
||||
|
||||
// Delete Prometheus pod to reset metrics
|
||||
cmd := exec.CommandContext(ctx, "kubectl", m.kubectl("delete", "pod", "-n", "monitoring",
|
||||
"-l", "app=prometheus", "--grace-period=0", "--force")...)
|
||||
cmd.Run() // Ignore errors
|
||||
cmd.Run()
|
||||
|
||||
// Wait for new pod
|
||||
fmt.Println("Waiting for Prometheus to restart...")
|
||||
waitCmd := exec.CommandContext(ctx, "kubectl", m.kubectl("wait", "--for=condition=ready", "pod",
|
||||
"-l", "app=prometheus", "-n", "monitoring", "--timeout=120s")...)
|
||||
@@ -136,12 +128,10 @@ func (m *Manager) Reset(ctx context.Context) error {
|
||||
return fmt.Errorf("waiting for prometheus restart: %w", err)
|
||||
}
|
||||
|
||||
// Restart port-forward
|
||||
if err := m.StartPortForward(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for scraping to initialize
|
||||
fmt.Println("Waiting 5s for Prometheus to initialize scraping...")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
@@ -155,7 +145,6 @@ func (m *Manager) isAccessible() bool {
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
// Also try HTTP
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/api/v1/status/config", m.localPort))
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -186,7 +175,6 @@ func (m *Manager) WaitForTarget(ctx context.Context, job string, timeout time.Du
|
||||
}
|
||||
}
|
||||
|
||||
// Print debug info on timeout
|
||||
m.printTargetStatus(job)
|
||||
return fmt.Errorf("timeout waiting for Prometheus to scrape job '%s'", job)
|
||||
}
|
||||
@@ -338,7 +326,6 @@ func (m *Manager) CollectMetrics(ctx context.Context, job, outputDir, scenario s
|
||||
// For S6 (restart scenario), use increase() to handle counter resets
|
||||
useIncrease := scenario == "S6"
|
||||
|
||||
// Counter metrics
|
||||
counterMetrics := []string{
|
||||
"reloader_reconcile_total",
|
||||
"reloader_action_total",
|
||||
@@ -363,7 +350,6 @@ func (m *Manager) CollectMetrics(ctx context.Context, job, outputDir, scenario s
|
||||
}
|
||||
}
|
||||
|
||||
// Histogram percentiles
|
||||
histogramMetrics := []struct {
|
||||
name string
|
||||
prefix string
|
||||
@@ -384,7 +370,6 @@ func (m *Manager) CollectMetrics(ctx context.Context, job, outputDir, scenario s
|
||||
}
|
||||
}
|
||||
|
||||
// REST client metrics
|
||||
restQueries := map[string]string{
|
||||
"rest_client_requests_total.json": fmt.Sprintf(`sum(rest_client_requests_total{job="%s"})`, job),
|
||||
"rest_client_requests_get.json": fmt.Sprintf(`sum(rest_client_requests_total{job="%s",method="GET"})`, job),
|
||||
@@ -399,30 +384,23 @@ func (m *Manager) CollectMetrics(ctx context.Context, job, outputDir, scenario s
|
||||
}
|
||||
}
|
||||
|
||||
// Resource consumption metrics (memory, CPU, goroutines)
|
||||
resourceQueries := map[string]string{
|
||||
// Memory metrics (in bytes)
|
||||
"memory_rss_bytes_avg.json": fmt.Sprintf(`avg_over_time(process_resident_memory_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
"memory_rss_bytes_max.json": fmt.Sprintf(`max_over_time(process_resident_memory_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
"memory_rss_bytes_cur.json": fmt.Sprintf(`process_resident_memory_bytes{job="%s"}`, job),
|
||||
|
||||
// Heap memory (Go runtime)
|
||||
"memory_heap_bytes_avg.json": fmt.Sprintf(`avg_over_time(go_memstats_heap_alloc_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
"memory_heap_bytes_max.json": fmt.Sprintf(`max_over_time(go_memstats_heap_alloc_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
|
||||
// CPU metrics (rate of CPU seconds used)
|
||||
"cpu_usage_cores_avg.json": fmt.Sprintf(`rate(process_cpu_seconds_total{job="%s"}[%s])`, job, timeRange),
|
||||
"cpu_usage_cores_max.json": fmt.Sprintf(`max_over_time(rate(process_cpu_seconds_total{job="%s"}[1m])[%s:1m])`, job, timeRange),
|
||||
|
||||
// Goroutines (concurrency indicator)
|
||||
"goroutines_avg.json": fmt.Sprintf(`avg_over_time(go_goroutines{job="%s"}[%s])`, job, timeRange),
|
||||
"goroutines_max.json": fmt.Sprintf(`max_over_time(go_goroutines{job="%s"}[%s])`, job, timeRange),
|
||||
"goroutines_cur.json": fmt.Sprintf(`go_goroutines{job="%s"}`, job),
|
||||
|
||||
// GC metrics
|
||||
"gc_duration_seconds_p99.json": fmt.Sprintf(`histogram_quantile(0.99, sum(rate(go_gc_duration_seconds_bucket{job="%s"}[%s])) by (le))`, job, timeRange),
|
||||
|
||||
// Threads
|
||||
"threads_cur.json": fmt.Sprintf(`go_threads{job="%s"}`, job),
|
||||
}
|
||||
|
||||
@@ -438,7 +416,6 @@ func (m *Manager) CollectMetrics(ctx context.Context, job, outputDir, scenario s
|
||||
func (m *Manager) queryAndSave(ctx context.Context, query, outputPath string) error {
|
||||
result, err := m.Query(ctx, query)
|
||||
if err != nil {
|
||||
// Write empty result on error
|
||||
emptyResult := `{"status":"success","data":{"resultType":"vector","result":[]}}`
|
||||
return os.WriteFile(outputPath, []byte(emptyResult), 0644)
|
||||
}
|
||||
|
||||
@@ -197,7 +197,6 @@ func (s *FanOutScenario) Run(ctx context.Context, client kubernetes.Interface, n
|
||||
|
||||
log.Println("S2: Updating shared ConfigMap...")
|
||||
|
||||
// Check context state before starting update loop
|
||||
if ctx.Err() != nil {
|
||||
log.Printf("S2: WARNING - Context already done before update loop: %v", ctx.Err())
|
||||
}
|
||||
@@ -503,14 +502,7 @@ func (s *WorkloadChurnScenario) Run(ctx context.Context, client kubernetes.Inter
|
||||
wg.Wait()
|
||||
log.Printf("S5: Created %d, deleted %d deployments, %d CM updates", deployCounter, deleteCounter, cmUpdateCount)
|
||||
|
||||
// S5 does NOT set expected values for action_total/reload_executed_total because:
|
||||
// - There are ~10 active deployments at any time (creates new, deletes old)
|
||||
// - Each CM update triggers reloads on ALL active deployments
|
||||
// - Exact counts depend on timing of creates/deletes vs CM updates
|
||||
// - "Not found" errors are expected when a deployment is deleted during processing
|
||||
// Instead, S5 pass/fail compares old vs new (both should be similar)
|
||||
return ExpectedMetrics{
|
||||
// No expected values - churn makes exact counts unpredictable
|
||||
Description: fmt.Sprintf("S5: Churn test - %d deploys created, %d deleted, %d CM updates, ~10 active deploys at any time", deployCounter, deleteCounter, cmUpdateCount),
|
||||
}, nil
|
||||
}
|
||||
@@ -765,8 +757,6 @@ func (s *LargeObjectScenario) Run(ctx context.Context, client kubernetes.Interfa
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func waitForDeploymentsReady(ctx context.Context, client kubernetes.Interface, namespace string, timeout time.Duration) error {
|
||||
log.Printf("Waiting for all deployments in %s to be ready (timeout: %v)...", namespace, timeout)
|
||||
|
||||
@@ -1051,7 +1041,6 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
|
||||
setupCtx := context.Background()
|
||||
|
||||
// Create Secrets
|
||||
for i := 0; i < numSecrets; i++ {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1067,7 +1056,6 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
}
|
||||
}
|
||||
|
||||
// Create ConfigMaps
|
||||
for i := 0; i < numConfigMaps; i++ {
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1083,7 +1071,6 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
}
|
||||
}
|
||||
|
||||
// Create Secret-only deployments
|
||||
for i := 0; i < numSecretOnlyDeploys; i++ {
|
||||
deploy := createDeploymentWithSecret(
|
||||
fmt.Sprintf("secret-only-deploy-%d", i),
|
||||
@@ -1095,7 +1082,6 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
}
|
||||
}
|
||||
|
||||
// Create ConfigMap-only deployments
|
||||
for i := 0; i < numConfigMapOnlyDeploys; i++ {
|
||||
deploy := createDeployment(
|
||||
fmt.Sprintf("cm-only-deploy-%d", i),
|
||||
@@ -1107,7 +1093,6 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
}
|
||||
}
|
||||
|
||||
// Create mixed deployments (using both Secret and ConfigMap)
|
||||
for i := 0; i < numMixedDeploys; i++ {
|
||||
deploy := createDeploymentWithBoth(
|
||||
fmt.Sprintf("mixed-deploy-%d", i),
|
||||
@@ -1131,7 +1116,7 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
updateSecret := true // Alternate between Secret and ConfigMap updates
|
||||
updateSecret := true
|
||||
|
||||
endTime := time.Now().Add(duration - 5*time.Second)
|
||||
for time.Now().Before(endTime) {
|
||||
@@ -1140,7 +1125,6 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
return s.calculateExpected(secretUpdateCount, cmUpdateCount, numSecrets, numConfigMaps, numSecretOnlyDeploys, numConfigMapOnlyDeploys, numMixedDeploys), nil
|
||||
case <-ticker.C:
|
||||
if updateSecret {
|
||||
// Update a random Secret
|
||||
secretIndex := rand.Intn(numSecrets)
|
||||
secret, err := client.CoreV1().Secrets(namespace).Get(setupCtx, fmt.Sprintf("mixed-secret-%d", secretIndex), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1153,7 +1137,6 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
secretUpdateCount++
|
||||
}
|
||||
} else {
|
||||
// Update a random ConfigMap
|
||||
cmIndex := rand.Intn(numConfigMaps)
|
||||
cm, err := client.CoreV1().ConfigMaps(namespace).Get(setupCtx, fmt.Sprintf("mixed-cm-%d", cmIndex), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1173,11 +1156,9 @@ func (s *SecretsAndMixedScenario) Run(ctx context.Context, client kubernetes.Int
|
||||
}
|
||||
|
||||
func (s *SecretsAndMixedScenario) calculateExpected(secretUpdates, cmUpdates, numSecrets, numConfigMaps, secretOnlyDeploys, cmOnlyDeploys, mixedDeploys int) ExpectedMetrics {
|
||||
// Average deploys triggered per random secret update
|
||||
avgSecretReloads := float64(secretOnlyDeploys)/float64(numSecrets) + float64(mixedDeploys)/float64(numSecrets)
|
||||
secretTriggeredReloads := int(float64(secretUpdates) * avgSecretReloads)
|
||||
|
||||
// Average deploys triggered per random CM update
|
||||
avgCMReloads := float64(cmOnlyDeploys)/float64(numConfigMaps) + float64(mixedDeploys)/float64(numConfigMaps)
|
||||
cmTriggeredReloads := int(float64(cmUpdates) * avgCMReloads)
|
||||
|
||||
@@ -1208,7 +1189,6 @@ func (s *MultiWorkloadTypeScenario) Run(ctx context.Context, client kubernetes.I
|
||||
|
||||
setupCtx := context.Background()
|
||||
|
||||
// Create shared ConfigMap
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "multi-type-cm",
|
||||
@@ -1220,7 +1200,6 @@ func (s *MultiWorkloadTypeScenario) Run(ctx context.Context, client kubernetes.I
|
||||
return ExpectedMetrics{}, fmt.Errorf("failed to create shared ConfigMap: %w", err)
|
||||
}
|
||||
|
||||
// Create Deployments
|
||||
for i := 0; i < numDeployments; i++ {
|
||||
deploy := createDeployment(fmt.Sprintf("multi-deploy-%d", i), namespace, "multi-type-cm")
|
||||
if _, err := client.AppsV1().Deployments(namespace).Create(setupCtx, deploy, metav1.CreateOptions{}); err != nil {
|
||||
@@ -1228,7 +1207,6 @@ func (s *MultiWorkloadTypeScenario) Run(ctx context.Context, client kubernetes.I
|
||||
}
|
||||
}
|
||||
|
||||
// Create StatefulSets
|
||||
for i := 0; i < numStatefulSets; i++ {
|
||||
sts := createStatefulSet(fmt.Sprintf("multi-sts-%d", i), namespace, "multi-type-cm")
|
||||
if _, err := client.AppsV1().StatefulSets(namespace).Create(setupCtx, sts, metav1.CreateOptions{}); err != nil {
|
||||
@@ -1236,7 +1214,6 @@ func (s *MultiWorkloadTypeScenario) Run(ctx context.Context, client kubernetes.I
|
||||
}
|
||||
}
|
||||
|
||||
// Create DaemonSets
|
||||
for i := 0; i < numDaemonSets; i++ {
|
||||
ds := createDaemonSet(fmt.Sprintf("multi-ds-%d", i), namespace, "multi-type-cm")
|
||||
if _, err := client.AppsV1().DaemonSets(namespace).Create(setupCtx, ds, metav1.CreateOptions{}); err != nil {
|
||||
@@ -1244,7 +1221,6 @@ func (s *MultiWorkloadTypeScenario) Run(ctx context.Context, client kubernetes.I
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for workloads to be ready
|
||||
if err := waitForDeploymentsReady(setupCtx, client, namespace, 3*time.Minute); err != nil {
|
||||
log.Printf("Warning: %v - continuing anyway", err)
|
||||
}
|
||||
@@ -1286,7 +1262,6 @@ func (s *MultiWorkloadTypeScenario) Run(ctx context.Context, client kubernetes.I
|
||||
}
|
||||
|
||||
func (s *MultiWorkloadTypeScenario) calculateExpected(updateCount, numDeployments, numStatefulSets, numDaemonSets int) ExpectedMetrics {
|
||||
// Each CM update triggers reload on all workloads
|
||||
totalWorkloads := numDeployments + numStatefulSets + numDaemonSets
|
||||
expectedReloads := updateCount * totalWorkloads
|
||||
|
||||
@@ -1376,7 +1351,6 @@ func createDaemonSet(name, namespace, configMapName string) *appsv1.DaemonSet {
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &terminationGracePeriod,
|
||||
// Use tolerations to run on all nodes including control-plane
|
||||
Tolerations: []corev1.Toleration{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
@@ -1509,7 +1483,6 @@ func (s *ComplexReferencesScenario) Run(ctx context.Context, client kubernetes.I
|
||||
|
||||
setupCtx := context.Background()
|
||||
|
||||
// Create ConfigMaps with multiple keys
|
||||
for i := 0; i < numConfigMaps; i++ {
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1527,9 +1500,7 @@ func (s *ComplexReferencesScenario) Run(ctx context.Context, client kubernetes.I
|
||||
}
|
||||
}
|
||||
|
||||
// Create complex deployments with various reference types
|
||||
for i := 0; i < numDeployments; i++ {
|
||||
// Each deployment references multiple ConfigMaps in different ways
|
||||
primaryCM := fmt.Sprintf("complex-cm-%d", i)
|
||||
secondaryCM := fmt.Sprintf("complex-cm-%d", (i+1)%numConfigMaps)
|
||||
|
||||
@@ -1560,7 +1531,6 @@ func (s *ComplexReferencesScenario) Run(ctx context.Context, client kubernetes.I
|
||||
case <-ctx.Done():
|
||||
return s.calculateExpected(updateCount, numConfigMaps, numDeployments), nil
|
||||
case <-ticker.C:
|
||||
// Update a random ConfigMap
|
||||
cmIndex := rand.Intn(numConfigMaps)
|
||||
cm, err := client.CoreV1().ConfigMaps(namespace).Get(setupCtx, fmt.Sprintf("complex-cm-%d", cmIndex), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1582,11 +1552,6 @@ func (s *ComplexReferencesScenario) Run(ctx context.Context, client kubernetes.I
|
||||
}
|
||||
|
||||
func (s *ComplexReferencesScenario) calculateExpected(updateCount, numConfigMaps, numDeployments int) ExpectedMetrics {
|
||||
// Each ConfigMap is referenced by:
|
||||
// - 1 deployment as primary (envFrom in init + valueFrom in main + volume mount)
|
||||
// - 1 deployment as secondary (projected volume)
|
||||
// So each CM update triggers 2 deployments (on average with random updates)
|
||||
// But since we're randomly updating, each update affects those 2 deployments
|
||||
expectedReloadsPerUpdate := 2
|
||||
expectedReloads := updateCount * expectedReloadsPerUpdate
|
||||
|
||||
@@ -1616,7 +1581,6 @@ func (s *PauseResumeScenario) Run(ctx context.Context, client kubernetes.Interfa
|
||||
|
||||
setupCtx := context.Background()
|
||||
|
||||
// Create ConfigMaps
|
||||
for i := 0; i < numConfigMaps; i++ {
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1630,7 +1594,6 @@ func (s *PauseResumeScenario) Run(ctx context.Context, client kubernetes.Interfa
|
||||
}
|
||||
}
|
||||
|
||||
// Create Deployments with pause-period annotation
|
||||
for i := 0; i < numDeployments; i++ {
|
||||
deploy := createDeploymentWithPause(
|
||||
fmt.Sprintf("pause-deploy-%d", i),
|
||||
@@ -1659,7 +1622,6 @@ func (s *PauseResumeScenario) Run(ctx context.Context, client kubernetes.Interfa
|
||||
case <-ctx.Done():
|
||||
return s.calculateExpected(updateCount, duration, updateInterval, pausePeriod), nil
|
||||
case <-ticker.C:
|
||||
// Update a random ConfigMap
|
||||
cmIndex := rand.Intn(numConfigMaps)
|
||||
cm, err := client.CoreV1().ConfigMaps(namespace).Get(setupCtx, fmt.Sprintf("pause-cm-%d", cmIndex), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1679,12 +1641,6 @@ func (s *PauseResumeScenario) Run(ctx context.Context, client kubernetes.Interfa
|
||||
}
|
||||
|
||||
func (s *PauseResumeScenario) calculateExpected(updateCount int, duration, updateInterval, pausePeriod time.Duration) ExpectedMetrics {
|
||||
// With pause-period, we expect fewer reloads than updates
|
||||
// Each deployment gets updates at random, and pause-period prevents rapid consecutive reloads
|
||||
// The exact count depends on the distribution of updates across ConfigMaps
|
||||
// Rough estimate: each CM gets updated ~(updateCount/10) times
|
||||
// With 15s pause and 2s interval, we get roughly 1 reload per pause period per CM
|
||||
// So expected reloads ≈ duration / pausePeriod per deployment = (duration/pausePeriod) * numDeployments
|
||||
|
||||
// This is an approximation - the actual value depends on random distribution
|
||||
expectedCycles := int(duration / pausePeriod)
|
||||
@@ -1693,8 +1649,6 @@ func (s *PauseResumeScenario) calculateExpected(updateCount int, duration, updat
|
||||
}
|
||||
|
||||
return ExpectedMetrics{
|
||||
// Don't set exact expected values since pause-period makes counts unpredictable
|
||||
// The scenario validates that reloads << updates due to pause behavior
|
||||
Description: fmt.Sprintf("S12: %d updates with %v pause-period (expect ~%d reload cycles, actual reloads << updates)",
|
||||
updateCount, pausePeriod, expectedCycles),
|
||||
}
|
||||
@@ -1703,7 +1657,6 @@ func (s *PauseResumeScenario) calculateExpected(updateCount int, duration, updat
|
||||
// AnnotationStrategyScenario - Tests annotation-based reload strategy.
|
||||
// This scenario deploys its own Reloader instance with --reload-strategy=annotations.
|
||||
type AnnotationStrategyScenario struct {
|
||||
// Image is the Reloader image to use. Must be set before running.
|
||||
Image string
|
||||
}
|
||||
|
||||
@@ -1719,7 +1672,6 @@ func (s *AnnotationStrategyScenario) Run(ctx context.Context, client kubernetes.
|
||||
|
||||
log.Println("S11: Deploying Reloader with --reload-strategy=annotations...")
|
||||
|
||||
// Deploy S11's own Reloader instance
|
||||
reloaderNS := "reloader-s11"
|
||||
mgr := reloader.NewManager(reloader.Config{
|
||||
Version: "s11",
|
||||
@@ -1732,7 +1684,6 @@ func (s *AnnotationStrategyScenario) Run(ctx context.Context, client kubernetes.
|
||||
return ExpectedMetrics{}, fmt.Errorf("deploying S11 reloader: %w", err)
|
||||
}
|
||||
|
||||
// Ensure cleanup on exit
|
||||
defer func() {
|
||||
log.Println("S11: Cleaning up S11-specific Reloader...")
|
||||
cleanupCtx := context.Background()
|
||||
@@ -1748,7 +1699,6 @@ func (s *AnnotationStrategyScenario) Run(ctx context.Context, client kubernetes.
|
||||
|
||||
setupCtx := context.Background()
|
||||
|
||||
// Create ConfigMaps
|
||||
for i := 0; i < numConfigMaps; i++ {
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1762,7 +1712,6 @@ func (s *AnnotationStrategyScenario) Run(ctx context.Context, client kubernetes.
|
||||
}
|
||||
}
|
||||
|
||||
// Create Deployments
|
||||
for i := 0; i < numDeployments; i++ {
|
||||
deploy := createDeployment(fmt.Sprintf("annot-deploy-%d", i), namespace, fmt.Sprintf("annot-cm-%d", i))
|
||||
if _, err := client.AppsV1().Deployments(namespace).Create(setupCtx, deploy, metav1.CreateOptions{}); err != nil {
|
||||
@@ -1781,13 +1730,12 @@ func (s *AnnotationStrategyScenario) Run(ctx context.Context, client kubernetes.
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
endTime := time.Now().Add(duration - 10*time.Second) // Extra time for cleanup
|
||||
endTime := time.Now().Add(duration - 10*time.Second)
|
||||
for time.Now().Before(endTime) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return s.calculateExpected(updateCount, annotationUpdatesSeen), nil
|
||||
case <-ticker.C:
|
||||
// Update a random ConfigMap
|
||||
cmIndex := rand.Intn(numConfigMaps)
|
||||
cm, err := client.CoreV1().ConfigMaps(namespace).Get(setupCtx, fmt.Sprintf("annot-cm-%d", cmIndex), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1800,7 +1748,6 @@ func (s *AnnotationStrategyScenario) Run(ctx context.Context, client kubernetes.
|
||||
updateCount++
|
||||
}
|
||||
|
||||
// Periodically check for annotation updates on deployments
|
||||
if updateCount%10 == 0 {
|
||||
deploy, err := client.AppsV1().Deployments(namespace).Get(setupCtx, fmt.Sprintf("annot-deploy-%d", cmIndex), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
@@ -1812,9 +1759,8 @@ func (s *AnnotationStrategyScenario) Run(ctx context.Context, client kubernetes.
|
||||
}
|
||||
}
|
||||
|
||||
// Final check: verify annotation strategy is working
|
||||
log.Println("S11: Verifying annotation-based reload...")
|
||||
time.Sleep(5 * time.Second) // Allow time for final updates to propagate
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
deploysWithAnnotation := 0
|
||||
for i := 0; i < numDeployments; i++ {
|
||||
@@ -1945,7 +1891,6 @@ func createComplexDeployment(name, namespace, primaryCM, secondaryCM string) *ap
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &terminationGracePeriod,
|
||||
// Init container using envFrom
|
||||
InitContainers: []corev1.Container{
|
||||
{
|
||||
Name: "init",
|
||||
@@ -1973,7 +1918,6 @@ func createComplexDeployment(name, namespace, primaryCM, secondaryCM string) *ap
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
// Main container using valueFrom (individual keys)
|
||||
{
|
||||
Name: "main",
|
||||
Image: "gcr.io/google-containers/busybox:1.27",
|
||||
@@ -2013,7 +1957,6 @@ func createComplexDeployment(name, namespace, primaryCM, secondaryCM string) *ap
|
||||
},
|
||||
},
|
||||
},
|
||||
// Sidecar using volume mount
|
||||
{
|
||||
Name: "sidecar",
|
||||
Image: "gcr.io/google-containers/busybox:1.27",
|
||||
@@ -2041,7 +1984,6 @@ func createComplexDeployment(name, namespace, primaryCM, secondaryCM string) *ap
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
// Regular ConfigMap volume
|
||||
{
|
||||
Name: "config-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
@@ -2052,7 +1994,6 @@ func createComplexDeployment(name, namespace, primaryCM, secondaryCM string) *ap
|
||||
},
|
||||
},
|
||||
},
|
||||
// Projected volume combining multiple ConfigMaps
|
||||
{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
|
||||
Reference in New Issue
Block a user