Updated yaml spec (#1851)

* v1beta3 spec can be read by preflight

* added test files for ease of testing

* updated v1beta3 guide doc and added tests

* fixed not removing tmp files from v1beta3 processing

* created v1beta2 to v1beta3 converter
This commit is contained in:
Noah Campbell
2025-09-16 15:43:46 -05:00
committed by GitHub
parent acc1aad843
commit d0584a4d4d
16 changed files with 2068 additions and 3 deletions

View File

@@ -0,0 +1,132 @@
package cli
import (
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/replicatedhq/troubleshoot/pkg/convert"
)
func ConvertCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "convert [input-file]",
Args: cobra.ExactArgs(1),
Short: "Convert v1beta2 preflight specs to v1beta3 format",
Long: `Convert v1beta2 preflight specs to v1beta3 format with templating and values.
This command converts a v1beta2 preflight spec to the new v1beta3 templated format. It will:
- Update the apiVersion to troubleshoot.sh/v1beta3
- Extract hardcoded values and create a values.yaml file
- Add conditional templating ({{- if .Values.feature.enabled }})
- Add placeholder docString comments for you to fill in
- Template hardcoded values with {{ .Values.* }} expressions
The conversion will create two files:
- [input-file]-v1beta3.yaml: The templated v1beta3 spec
- [input-file]-values.yaml: The values file with extracted configuration
Example:
preflight convert my-preflight.yaml
This creates:
my-preflight-v1beta3.yaml
my-preflight-values.yaml`,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlags(cmd.Flags())
},
RunE: func(cmd *cobra.Command, args []string) error {
v := viper.GetViper()
inputFile := args[0]
outputSpec := v.GetString("output-spec")
outputValues := v.GetString("output-values")
// Generate default output filenames if not specified
if outputSpec == "" {
ext := filepath.Ext(inputFile)
base := strings.TrimSuffix(inputFile, ext)
outputSpec = base + "-v1beta3" + ext
}
if outputValues == "" {
ext := filepath.Ext(inputFile)
base := strings.TrimSuffix(inputFile, ext)
outputValues = base + "-values" + ext
}
return runConvert(v, inputFile, outputSpec, outputValues)
},
}
cmd.Flags().String("output-spec", "", "Output file for the templated v1beta3 spec (default: [input]-v1beta3.yaml)")
cmd.Flags().String("output-values", "", "Output file for the values (default: [input]-values.yaml)")
cmd.Flags().Bool("dry-run", false, "Preview the conversion without writing files")
return cmd
}
func runConvert(v *viper.Viper, inputFile, outputSpec, outputValues string) error {
// Read input file
inputData, err := ioutil.ReadFile(inputFile)
if err != nil {
return errors.Wrapf(err, "failed to read input file %s", inputFile)
}
// Check if it's a valid v1beta2 preflight spec
if !strings.Contains(string(inputData), "troubleshoot.sh/v1beta2") {
return fmt.Errorf("input file does not appear to be a v1beta2 troubleshoot spec")
}
if !strings.Contains(string(inputData), "kind: Preflight") {
return fmt.Errorf("input file does not appear to be a Preflight spec")
}
// Convert to v1beta3
result, err := convert.ConvertToV1Beta3(inputData)
if err != nil {
return errors.Wrap(err, "failed to convert spec")
}
dryRun := v.GetBool("dry-run")
if dryRun {
fmt.Println("=== Templated v1beta3 Spec ===")
fmt.Println(result.TemplatedSpec)
fmt.Println("\n=== Values File ===")
fmt.Println(result.ValuesFile)
fmt.Println("\n=== Conversion Summary ===")
fmt.Printf("Would write templated spec to: %s\n", outputSpec)
fmt.Printf("Would write values to: %s\n", outputValues)
return nil
}
// Write templated spec
err = ioutil.WriteFile(outputSpec, []byte(result.TemplatedSpec), 0644)
if err != nil {
return errors.Wrapf(err, "failed to write templated spec to %s", outputSpec)
}
// Write values file
err = ioutil.WriteFile(outputValues, []byte(result.ValuesFile), 0644)
if err != nil {
return errors.Wrapf(err, "failed to write values to %s", outputValues)
}
fmt.Printf("Successfully converted %s to v1beta3 format:\n", inputFile)
fmt.Printf(" Templated spec: %s\n", outputSpec)
fmt.Printf(" Values file: %s\n", outputValues)
fmt.Println("\nNext steps:")
fmt.Println("1. Add docStrings with Title, Requirement, and rationale for each check")
fmt.Println("2. Customize the values in the values file")
fmt.Println("3. Test the conversion with:")
fmt.Printf(" preflight template %s --values %s\n", outputSpec, outputValues)
fmt.Println("4. Run the templated preflight:")
fmt.Printf(" preflight run %s --values %s\n", outputSpec, outputValues)
return nil
}

View File

@@ -88,6 +88,8 @@ that a cluster meets the requirements to run an application.`,
cmd.AddCommand(OciFetchCmd())
cmd.AddCommand(TemplateCmd())
cmd.AddCommand(DocsCmd())
cmd.AddCommand(ConvertCmd())
preflight.AddFlags(cmd.PersistentFlags())
// Dry run flag should be in cmd.PersistentFlags() flags made available to all subcommands
@@ -96,6 +98,10 @@ that a cluster meets the requirements to run an application.`,
cmd.Flags().Bool("no-uri", false, "When this flag is used, Preflight does not attempt to retrieve the spec referenced by the uri: field`")
cmd.Flags().Bool("auto-update", true, "enable automatic binary self-update check and install")
// Template values for v1beta3 specs
cmd.Flags().StringSlice("values", []string{}, "Path to YAML files containing template values for v1beta3 specs (can be used multiple times)")
cmd.Flags().StringSlice("set", []string{}, "Set template values on the command line for v1beta3 specs (can be used multiple times)")
k8sutil.AddFlags(cmd.Flags())
// Initialize klog flags

374
docs/v1beta3-guide.md Normal file
View File

@@ -0,0 +1,374 @@
## Writing modular, templated Preflight specs (v1beta3 style)
This guide shows how to author preflight YAML specs in a modular, values-driven style like `v1beta3.yaml`. The goal is to keep checks self-documenting, easy to toggle on/off, and customizable via values files or inline `--set` flags.
### Core structure
- **Header**
- `apiVersion`: `troubleshoot.sh/v1beta3`
- `kind`: `Preflight`
- `metadata.name`: a short, stable identifier
- **Spec**
- `spec.analyzers`: list of checks (analyzers)
- Each analyzer is optionally guarded by templating conditionals (e.g., `{{- if .Values.kubernetes.enabled }}`)
- A `docString` accompanies each analyzer, describing the requirement, why it matters, and any links
### Use templating and values
The examples use Go templates with the standard Sprig function set. Values can be supplied by files (`--values`) and/or inline overrides (`--set`), and accessed in templates via `.Values`.
- **Toggling sections**: wrap analyzer blocks in conditionals tied to values.
```yaml
{{- if .Values.storage.enabled }}
- docString: |
Title: Default StorageClass Requirements
Requirement:
- A StorageClass named "{{ .Values.storage.className }}" must exist
...
storageClass:
checkName: Default StorageClass
storageClassName: '{{ .Values.storage.className }}'
outcomes:
- fail:
message: Default StorageClass not found
- pass:
message: Default StorageClass present
{{- end }}
```
- **Values**: template expressions directly use values from your values files.
```yaml
{{ .Values.kubernetes.minVersion }}
```
- **Nested conditionals**: further constrain checks (e.g., only when a specific ingress type is used).
```yaml
{{- if .Values.ingress.enabled }}
{{- if eq .Values.ingress.type "Contour" }}
- docString: |
Title: Required CRDs and Ingress Capabilities
...
customResourceDefinition:
checkName: Contour IngressRoute CRD
customResourceDefinitionName: ingressroutes.contour.heptio.com
outcomes:
- fail:
message: Contour IngressRoute CRD not found; required for ingress routing
- pass:
message: Contour IngressRoute CRD present
{{- end }}
{{- end }}
```
### Author high-quality docString blocks
Every analyzer should start with a `docString` so you can extract documentation automatically:
- **Title**: a concise name for the requirement
- **Requirement**: bullet list of specific, testable criteria (e.g., versions, counts, names)
- **Rationale**: 13 sentences explaining why the requirement exists and the impact if unmet
- **Links**: include authoritative docs with stable URLs
Example:
```yaml
docString: |
Title: Required CRDs and Ingress Capabilities
Requirement:
- Ingress Controller: Contour
- CRD must be present:
- Group: heptio.com
- Kind: IngressRoute
- Version: v1beta1 or later served version
The ingress layer terminates TLS and routes external traffic to Services.
Contour relies on the IngressRoute CRD to express host/path routing, TLS
configuration, and policy. If the CRD is not installed and served by the
API server, Contour cannot reconcile desired state, leaving routes
unconfigured and traffic unreachable.
```
### Choose the right analyzer type and outcomes
Use the analyzer that matches the requirement, and enumerate `outcomes` with clear messages. Common analyzers in this style:
- **clusterVersion**: compare to min and recommended versions
```yaml
clusterVersion:
checkName: Kubernetes version
outcomes:
- fail:
when: '< {{ .Values.kubernetes.minVersion }}'
message: This application requires at least Kubernetes {{ .Values.kubernetes.minVersion }}.
- warn:
when: '< {{ .Values.kubernetes.recommendedVersion }}'
message: Recommended version is {{ .Values.kubernetes.recommendedVersion }} or later.
- pass:
when: '>= {{ .Values.kubernetes.recommendedVersion }}'
message: Your cluster meets the recommended and required versions of Kubernetes.
```
- **customResourceDefinition**: ensure a CRD exists
```yaml
customResourceDefinition:
checkName: Contour IngressRoute CRD
customResourceDefinitionName: ingressroutes.contour.heptio.com
outcomes:
- fail:
message: Contour IngressRoute CRD not found; required for ingress routing
- pass:
message: Contour IngressRoute CRD present
```
- **containerRuntime**: verify container runtime
```yaml
containerRuntime:
outcomes:
- pass:
when: '== containerd'
message: containerd runtime detected
- fail:
message: Unsupported container runtime; containerd required
```
- **storageClass**: check for a named StorageClass (often the default)
```yaml
storageClass:
checkName: Default StorageClass
storageClassName: '{{ .Values.storage.className }}'
outcomes:
- fail:
message: Default StorageClass not found
- pass:
message: Default StorageClass present
```
- **distribution**: whitelist/blacklist distributions
```yaml
distribution:
outcomes:
- fail:
when: '== docker-desktop'
message: The application does not support Docker Desktop Clusters
- pass:
when: '== eks'
message: EKS is a supported distribution
- warn:
message: Unable to determine the distribution of Kubernetes
```
- **nodeResources**: aggregate across nodes; common patterns include count, CPU, memory, and ephemeral storage
```yaml
# Node count requirement
nodeResources:
checkName: Node count
outcomes:
- fail:
when: 'count() < {{ .Values.cluster.minNodes }}'
message: This application requires at least {{ .Values.cluster.minNodes }} nodes.
- warn:
when: 'count() < {{ .Values.cluster.recommendedNodes }}'
message: This application recommends at least {{ .Values.cluster.recommendedNodes }} nodes.
- pass:
message: This cluster has enough nodes.
# Cluster CPU total
nodeResources:
checkName: Cluster CPU total
outcomes:
- fail:
when: 'sum(cpuCapacity) < {{ .Values.cluster.minCPU }}'
message: The cluster must contain at least {{ .Values.cluster.minCPU }} cores
- pass:
message: There are at least {{ .Values.cluster.minCPU }} cores in the cluster
# Per-node memory (Gi)
nodeResources:
checkName: Per-node memory requirement
outcomes:
- fail:
when: 'min(memoryCapacity) < {{ .Values.node.minMemoryGi }}Gi'
message: All nodes must have at least {{ .Values.node.minMemoryGi }} GiB of memory.
- warn:
when: 'min(memoryCapacity) < {{ .Values.node.recommendedMemoryGi }}Gi'
message: All nodes are recommended to have at least {{ .Values.node.recommendedMemoryGi }} GiB of memory.
- pass:
message: All nodes have at least {{ .Values.node.recommendedMemoryGi }} GiB of memory.
# Per-node ephemeral storage (Gi)
nodeResources:
checkName: Per-node ephemeral storage requirement
outcomes:
- fail:
when: 'min(ephemeralStorageCapacity) < {{ .Values.node.minEphemeralGi }}Gi'
message: All nodes must have at least {{ .Values.node.minEphemeralGi }} GiB of ephemeral storage.
- warn:
when: 'min(ephemeralStorageCapacity) < {{ .Values.node.recommendedEphemeralGi }}Gi'
message: All nodes are recommended to have at least {{ .Values.node.recommendedEphemeralGi }} GiB of ephemeral storage.
- pass:
message: All nodes have at least {{ .Values.node.recommendedEphemeralGi }} GiB of ephemeral storage.
```
### Design conventions for maintainability
- **Guard every optional analyzer** with a values toggle, so consumers can enable only what they need.
- **Use `checkName`** to provide a stable, user-facing label for each check.
- **Prefer `fail` for unmet hard requirements**, `warn` for soft requirements, and `pass` with a direct, affirmative message.
- **Attach `uri`** to outcomes when helpful for remediation.
- **Keep docString in sync** with the actual checks; avoid drift by templating values into both the docs and the analyzer.
- **Ensure values files contain all required fields** since templates now directly use values without fallback defaults.
### Values files: shape and examples
Provide a values schema that mirrors your toggles and thresholds. Example full and minimal values are included in this repository:
- `values-v1beta3-full.yaml` (all features enabled, opinionated defaults)
- `values-v1beta3-minimal.yaml` (most features disabled, conservative thresholds)
Typical structure:
```yaml
kubernetes:
enabled: false
minVersion: "1.22.0"
recommendedVersion: "1.29.0"
storage:
enabled: true
className: "default"
cluster:
minNodes: 3
recommendedNodes: 5
minCPU: 4
node:
minMemoryGi: 8
recommendedMemoryGi: 32
minEphemeralGi: 40
recommendedEphemeralGi: 100
ingress:
enabled: true
type: "Contour"
runtime:
enabled: true
distribution:
enabled: true
nodeChecks:
enabled: true
count:
enabled: true
cpu:
enabled: true
memory:
enabled: true
ephemeral:
enabled: true
```
### Render, run, and extract docs
You can render templates, run preflights with values, and extract requirement docs without running checks.
- **Render a templated preflight spec** to stdout or a file:
```bash
preflight template v1beta3.yaml \
--values values-base.yaml \
--values values-prod.yaml \
--set storage.className=fast-local \
-o rendered-preflight.yaml
```
- **Run preflights with values** (values and sets also work with `preflight` root command):
```bash
preflight run rendered-preflight.yaml
# or run directly against the template with values
preflight run v1beta3.yaml --values values-prod.yaml --set cluster.minNodes=5
```
- **Extract only documentation** from enabled analyzers in one or more templates:
```bash
preflight docs v1beta3.yaml other-spec.yaml \
--values values-prod.yaml \
--set kubernetes.enabled=true \
-o REQUIREMENTS.md
```
Notes:
- Multiple `--values` files are merged in order; later files win.
- `--set` uses Helm-style semantics for nested keys and types, applied after files.
### Authoring checklist
- Add `docString` with Title, Requirement bullets, rationale, and links.
- Gate optional analyzers with `{{- if .Values.<feature>.enabled }}`.
- Parameterize thresholds and names with `.Values` expressions.
- Ensure all required values are present in your values files since there are no fallback defaults.
- Use precise, user-actionable `message` text for each outcome; add `uri` where helpful.
- Prefer a minimal values file with everything disabled, and a full values file enabling most checks.
- Test with `preflight template` (no values, minimal, full) and verify `preflight docs` output reads well.
### Example skeleton to start a new spec
```yaml
apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: your-product-preflight
spec:
analyzers:
{{- if .Values.kubernetes.enabled }}
- docString: |
Title: Kubernetes Control Plane Requirements
Requirement:
- Version:
- Minimum: {{ .Values.kubernetes.minVersion }}
- Recommended: {{ .Values.kubernetes.recommendedVersion }}
Running below minimum may remove GA APIs and critical fixes.
clusterVersion:
checkName: Kubernetes version
outcomes:
- fail:
when: '< {{ .Values.kubernetes.minVersion }}'
message: Requires Kubernetes >= {{ .Values.kubernetes.minVersion }}.
- warn:
when: '< {{ .Values.kubernetes.recommendedVersion }}'
message: Recommended {{ .Values.kubernetes.recommendedVersion }} or later.
- pass:
when: '>= {{ .Values.kubernetes.recommendedVersion }}'
message: Meets recommended and required versions.
{{- end }}
{{- if .Values.storage.enabled }}
- docString: |
Title: Default StorageClass Requirements
Requirement:
- A StorageClass named "{{ .Values.storage.className }}" must exist
storageClass:
checkName: Default StorageClass
storageClassName: '{{ .Values.storage.className }}'
outcomes:
- fail:
message: Default StorageClass not found
- pass:
message: Default StorageClass present
{{- end }}
```
### References
- Example template in this repo: `v1beta3.yaml`
- Values examples: `values-v1beta3-full.yaml`, `values-v1beta3-minimal.yaml`

View File

@@ -80,6 +80,7 @@ const (
PreflightKey2 = "preflight-spec"
// Troubleshoot spec constants
Troubleshootv1beta3Kind = "troubleshoot.sh/v1beta3"
Troubleshootv1beta2Kind = "troubleshoot.sh/v1beta2"
Troubleshootv1beta1Kind = "troubleshoot.replicated.com/v1beta1"

633
pkg/convert/v1beta3.go Normal file
View File

@@ -0,0 +1,633 @@
package convert
import (
"bytes"
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
// V1Beta2ToV1Beta3Result holds the conversion results
type V1Beta2ToV1Beta3Result struct {
TemplatedSpec string `yaml:"-"`
ValuesFile string `yaml:"-"`
Values map[string]interface{} `yaml:"-"`
}
// ConvertToV1Beta3 converts a v1beta2 preflight spec to v1beta3 format with templating
func ConvertToV1Beta3(doc []byte) (*V1Beta2ToV1Beta3Result, error) {
var parsed map[string]interface{}
err := yaml.Unmarshal(doc, &parsed)
if err != nil {
return nil, errors.Wrap(err, "failed to unmarshal yaml")
}
// Check if it's already v1beta3
if apiVersion, ok := parsed["apiVersion"]; ok && apiVersion == "troubleshoot.sh/v1beta3" {
return nil, errors.New("document is already v1beta3")
}
// Check if it's v1beta2
if apiVersion, ok := parsed["apiVersion"]; !ok || apiVersion != "troubleshoot.sh/v1beta2" {
return nil, errors.Errorf("unsupported apiVersion: %v", apiVersion)
}
// Check if it's a preflight spec
if kind, ok := parsed["kind"]; !ok || kind != "Preflight" {
return nil, errors.Errorf("unsupported kind: %v", kind)
}
// Extract values and create templated spec
values := make(map[string]interface{})
converter := &v1beta3Converter{
values: values,
spec: parsed,
}
templatedSpec, err := converter.convert()
if err != nil {
return nil, errors.Wrap(err, "failed to convert spec")
}
// Marshal values
valuesBytes, err := yaml.Marshal(values)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal values")
}
return &V1Beta2ToV1Beta3Result{
TemplatedSpec: templatedSpec,
ValuesFile: string(valuesBytes),
Values: values,
}, nil
}
type v1beta3Converter struct {
values map[string]interface{}
spec map[string]interface{}
}
func (c *v1beta3Converter) convert() (string, error) {
// Initialize values structure
c.initializeValues()
// Get metadata name
metadataName := "converted-from-v1beta2"
if metadata, ok := c.spec["metadata"].(map[interface{}]interface{}); ok {
if name, ok := metadata["name"].(string); ok {
metadataName = name
}
}
// Process spec
var analyzers []interface{}
if spec, ok := c.spec["spec"].(map[interface{}]interface{}); ok {
if analyzersList, ok := spec["analyzers"].([]interface{}); ok {
convertedAnalyzers, err := c.convertAnalyzers(analyzersList)
if err != nil {
return "", errors.Wrap(err, "failed to convert analyzers")
}
analyzers = convertedAnalyzers
}
}
// Build the templated spec string
var buf bytes.Buffer
// Header
buf.WriteString("apiVersion: troubleshoot.sh/v1beta3\n")
buf.WriteString("kind: Preflight\n")
buf.WriteString("metadata:\n")
buf.WriteString(fmt.Sprintf(" name: %s\n", metadataName))
buf.WriteString("spec:\n")
buf.WriteString(" analyzers:\n")
// Add each analyzer
for _, analyzer := range analyzers {
if analyzerStr, ok := analyzer.(string); ok {
// This is already a templated string
buf.WriteString(" ")
buf.WriteString(strings.ReplaceAll(analyzerStr, "\n", "\n "))
buf.WriteString("\n")
} else {
// Convert to YAML and add as-is
analyzerBytes, err := yaml.Marshal(analyzer)
if err != nil {
return "", errors.Wrap(err, "failed to marshal analyzer")
}
lines := strings.Split(string(analyzerBytes), "\n")
for _, line := range lines {
if strings.TrimSpace(line) != "" {
buf.WriteString(" - ")
buf.WriteString(line)
buf.WriteString("\n")
}
}
}
}
return buf.String(), nil
}
func (c *v1beta3Converter) initializeValues() {
c.values["kubernetes"] = map[string]interface{}{
"enabled": false,
"minVersion": "1.20.0",
"recommendedVersion": "1.22.0",
}
c.values["storage"] = map[string]interface{}{
"enabled": false,
"className": "default",
}
c.values["cluster"] = map[string]interface{}{
"minNodes": 3,
"recommendedNodes": 5,
"minCPU": 4,
}
c.values["node"] = map[string]interface{}{
"minMemoryGi": 8,
"recommendedMemoryGi": 32,
"minEphemeralGi": 40,
"recommendedEphemeralGi": 100,
}
c.values["ingress"] = map[string]interface{}{
"enabled": false,
"type": "Contour",
}
c.values["runtime"] = map[string]interface{}{
"enabled": false,
}
c.values["distribution"] = map[string]interface{}{
"enabled": false,
}
c.values["nodeChecks"] = map[string]interface{}{
"enabled": false,
"count": map[string]interface{}{
"enabled": false,
},
"cpu": map[string]interface{}{
"enabled": false,
},
"memory": map[string]interface{}{
"enabled": false,
},
"ephemeral": map[string]interface{}{
"enabled": false,
},
}
}
func (c *v1beta3Converter) convertAnalyzers(analyzers []interface{}) ([]interface{}, error) {
var result []interface{}
for _, analyzer := range analyzers {
if analyzerMap, ok := analyzer.(map[interface{}]interface{}); ok {
converted, err := c.convertAnalyzer(analyzerMap)
if err != nil {
return nil, err
}
if converted != nil {
result = append(result, converted)
}
}
}
return result, nil
}
func (c *v1beta3Converter) convertAnalyzer(analyzer map[interface{}]interface{}) (interface{}, error) {
// Convert analyzer based on type
if _, exists := analyzer["clusterVersion"]; exists {
return c.convertClusterVersion(analyzer)
}
if _, exists := analyzer["customResourceDefinition"]; exists {
return c.convertCustomResourceDefinition(analyzer)
}
if _, exists := analyzer["containerRuntime"]; exists {
return c.convertContainerRuntime(analyzer)
}
if _, exists := analyzer["storageClass"]; exists {
return c.convertStorageClass(analyzer)
}
if _, exists := analyzer["distribution"]; exists {
return c.convertDistribution(analyzer)
}
if _, exists := analyzer["nodeResources"]; exists {
return c.convertNodeResources(analyzer)
}
// For unrecognized analyzers, return as-is with warning comment
return c.wrapWithWarning(analyzer, "Unknown analyzer type - manual review required")
}
func (c *v1beta3Converter) convertClusterVersion(analyzer map[interface{}]interface{}) (interface{}, error) {
// Enable kubernetes checks
c.setNestedValue("kubernetes.enabled", true)
// Extract version requirements from outcomes
if cv, ok := analyzer["clusterVersion"].(map[interface{}]interface{}); ok {
if outcomes, ok := cv["outcomes"].([]interface{}); ok {
c.extractVersionRequirements(outcomes)
}
}
return c.createTemplatedAnalyzer("kubernetes", analyzer, "")
}
func (c *v1beta3Converter) convertCustomResourceDefinition(analyzer map[interface{}]interface{}) (interface{}, error) {
c.setNestedValue("ingress.enabled", true)
if crd, ok := analyzer["customResourceDefinition"].(map[interface{}]interface{}); ok {
if crdName, ok := crd["customResourceDefinitionName"].(string); ok {
if strings.Contains(crdName, "contour") {
c.setNestedValue("ingress.type", "Contour")
}
}
}
return c.createTemplatedAnalyzer("ingress", analyzer, "")
}
func (c *v1beta3Converter) convertContainerRuntime(analyzer map[interface{}]interface{}) (interface{}, error) {
c.setNestedValue("runtime.enabled", true)
return c.createTemplatedAnalyzer("runtime", analyzer, "")
}
func (c *v1beta3Converter) convertStorageClass(analyzer map[interface{}]interface{}) (interface{}, error) {
c.setNestedValue("storage.enabled", true)
// Extract storage class name
if sc, ok := analyzer["storageClass"].(map[interface{}]interface{}); ok {
if className, ok := sc["storageClassName"].(string); ok {
c.setNestedValue("storage.className", className)
}
}
// Update the analyzer to use template
if sc, ok := analyzer["storageClass"].(map[interface{}]interface{}); ok {
sc["storageClassName"] = "{{ .Values.storage.className }}"
}
return c.createTemplatedAnalyzer("storage", analyzer, "")
}
func (c *v1beta3Converter) convertDistribution(analyzer map[interface{}]interface{}) (interface{}, error) {
c.setNestedValue("distribution.enabled", true)
return c.createTemplatedAnalyzer("distribution", analyzer, "")
}
func (c *v1beta3Converter) convertNodeResources(analyzer map[interface{}]interface{}) (interface{}, error) {
if nr, ok := analyzer["nodeResources"].(map[interface{}]interface{}); ok {
checkName := ""
if name, ok := nr["checkName"].(string); ok {
checkName = strings.ToLower(name)
}
// Determine node resource type and enable appropriate check
if strings.Contains(checkName, "node") && strings.Contains(checkName, "count") {
c.setNestedValue("nodeChecks.enabled", true)
c.setNestedValue("nodeChecks.count.enabled", true)
c.extractNodeCountRequirements(nr)
return c.createTemplatedAnalyzer("nodeChecks.count", analyzer, "")
}
if strings.Contains(checkName, "cpu") || strings.Contains(checkName, "core") {
c.setNestedValue("nodeChecks.enabled", true)
c.setNestedValue("nodeChecks.cpu.enabled", true)
c.extractCPURequirements(nr)
return c.createTemplatedAnalyzer("nodeChecks.cpu", analyzer, "")
}
if strings.Contains(checkName, "memory") {
c.setNestedValue("nodeChecks.enabled", true)
c.setNestedValue("nodeChecks.memory.enabled", true)
c.extractMemoryRequirements(nr)
c.templatizeMemoryOutcomes(analyzer)
return c.createTemplatedAnalyzer("nodeChecks.memory", analyzer, "")
}
if strings.Contains(checkName, "ephemeral") || strings.Contains(checkName, "storage") {
c.setNestedValue("nodeChecks.enabled", true)
c.setNestedValue("nodeChecks.ephemeral.enabled", true)
c.extractEphemeralRequirements(nr)
c.templatizeEphemeralOutcomes(analyzer)
return c.createTemplatedAnalyzer("nodeChecks.ephemeral", analyzer, "")
}
}
// Default case - enable general node checks
c.setNestedValue("nodeChecks.enabled", true)
return c.createTemplatedAnalyzer("nodeChecks", analyzer, "")
}
func (c *v1beta3Converter) createTemplatedAnalyzer(checkType string, originalAnalyzer map[interface{}]interface{}, docString string) (interface{}, error) {
// Convert map[interface{}]interface{} to map[string]interface{} for proper YAML output
convertedAnalyzer := c.convertMapKeys(originalAnalyzer)
// Add placeholder docString - user should replace with their actual requirements
convertedAnalyzer["docString"] = "# TODO: Add docString with Title, Requirement, and rationale for this check"
// Marshal the analyzer to YAML
analyzerBytes, err := yaml.Marshal(convertedAnalyzer)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal analyzer")
}
// Create template string with proper indentation
analyzerYAML := strings.TrimSuffix(string(analyzerBytes), "\n")
// Add conditional wrapper
condition := fmt.Sprintf("{{- if .Values.%s.enabled }}", checkType)
endCondition := "{{- end }}"
templateStr := fmt.Sprintf("%s\n- %s\n%s", condition,
strings.ReplaceAll(analyzerYAML, "\n", "\n "),
endCondition)
return templateStr, nil
}
func (c *v1beta3Converter) wrapWithWarning(analyzer map[interface{}]interface{}, warning string) (interface{}, error) {
convertedAnalyzer := c.convertMapKeys(analyzer)
convertedAnalyzer["docString"] = fmt.Sprintf("# TODO: Manual Review Required - %s", warning)
return convertedAnalyzer, nil
}
func (c *v1beta3Converter) convertMapKeys(m map[interface{}]interface{}) map[string]interface{} {
result := make(map[string]interface{})
for k, v := range m {
strKey := fmt.Sprintf("%v", k)
switch val := v.(type) {
case map[interface{}]interface{}:
result[strKey] = c.convertMapKeys(val)
case []interface{}:
result[strKey] = c.convertSlice(val)
default:
result[strKey] = val
}
}
return result
}
func (c *v1beta3Converter) convertSlice(s []interface{}) []interface{} {
result := make([]interface{}, len(s))
for i, v := range s {
switch val := v.(type) {
case map[interface{}]interface{}:
result[i] = c.convertMapKeys(val)
case []interface{}:
result[i] = c.convertSlice(val)
default:
result[i] = val
}
}
return result
}
// Helper methods for extracting requirements from outcomes
func (c *v1beta3Converter) extractVersionRequirements(outcomes []interface{}) {
for _, outcome := range outcomes {
if outcomeMap, ok := outcome.(map[interface{}]interface{}); ok {
if fail, ok := outcomeMap["fail"].(map[interface{}]interface{}); ok {
if when, ok := fail["when"].(string); ok {
if version := c.extractVersionFromWhen(when); version != "" {
c.setNestedValue("kubernetes.minVersion", version)
}
}
}
if warn, ok := outcomeMap["warn"].(map[interface{}]interface{}); ok {
if when, ok := warn["when"].(string); ok {
if version := c.extractVersionFromWhen(when); version != "" {
c.setNestedValue("kubernetes.recommendedVersion", version)
}
}
}
}
}
}
func (c *v1beta3Converter) extractVersionFromWhen(when string) string {
// Simple version extraction from conditions like "< 1.22.0"
when = strings.TrimSpace(when)
if strings.HasPrefix(when, "<") {
version := strings.TrimSpace(strings.TrimPrefix(when, "<"))
version = strings.Trim(version, `"`)
return version
}
return ""
}
func (c *v1beta3Converter) extractNodeCountRequirements(nr map[interface{}]interface{}) {
if outcomes, ok := nr["outcomes"].([]interface{}); ok {
for _, outcome := range outcomes {
if outcomeMap, ok := outcome.(map[interface{}]interface{}); ok {
if fail, ok := outcomeMap["fail"].(map[interface{}]interface{}); ok {
if when, ok := fail["when"].(string); ok {
if count := c.extractNumberFromWhen(when, "count()"); count > 0 {
c.setNestedValue("cluster.minNodes", count)
}
}
}
if warn, ok := outcomeMap["warn"].(map[interface{}]interface{}); ok {
if when, ok := warn["when"].(string); ok {
if count := c.extractNumberFromWhen(when, "count()"); count > 0 {
c.setNestedValue("cluster.recommendedNodes", count)
}
}
}
}
}
}
}
func (c *v1beta3Converter) extractCPURequirements(nr map[interface{}]interface{}) {
if outcomes, ok := nr["outcomes"].([]interface{}); ok {
for _, outcome := range outcomes {
if outcomeMap, ok := outcome.(map[interface{}]interface{}); ok {
if fail, ok := outcomeMap["fail"].(map[interface{}]interface{}); ok {
if when, ok := fail["when"].(string); ok {
if cpu := c.extractNumberFromWhen(when, "sum(cpuCapacity)"); cpu > 0 {
c.setNestedValue("cluster.minCPU", cpu)
}
}
}
}
}
}
}
func (c *v1beta3Converter) extractMemoryRequirements(nr map[interface{}]interface{}) {
if outcomes, ok := nr["outcomes"].([]interface{}); ok {
for _, outcome := range outcomes {
if outcomeMap, ok := outcome.(map[interface{}]interface{}); ok {
if fail, ok := outcomeMap["fail"].(map[interface{}]interface{}); ok {
if when, ok := fail["when"].(string); ok {
if memory := c.extractMemoryFromWhen(when); memory > 0 {
c.setNestedValue("node.minMemoryGi", memory)
}
}
}
if warn, ok := outcomeMap["warn"].(map[interface{}]interface{}); ok {
if when, ok := warn["when"].(string); ok {
if memory := c.extractMemoryFromWhen(when); memory > 0 {
c.setNestedValue("node.recommendedMemoryGi", memory)
}
}
}
}
}
}
}
func (c *v1beta3Converter) extractEphemeralRequirements(nr map[interface{}]interface{}) {
if outcomes, ok := nr["outcomes"].([]interface{}); ok {
for _, outcome := range outcomes {
if outcomeMap, ok := outcome.(map[interface{}]interface{}); ok {
if fail, ok := outcomeMap["fail"].(map[interface{}]interface{}); ok {
if when, ok := fail["when"].(string); ok {
if storage := c.extractStorageFromWhen(when); storage > 0 {
c.setNestedValue("node.minEphemeralGi", storage)
}
}
}
if warn, ok := outcomeMap["warn"].(map[interface{}]interface{}); ok {
if when, ok := warn["when"].(string); ok {
if storage := c.extractStorageFromWhen(when); storage > 0 {
c.setNestedValue("node.recommendedEphemeralGi", storage)
}
}
}
}
}
}
}
func (c *v1beta3Converter) extractNumberFromWhen(when, prefix string) int {
when = strings.TrimSpace(when)
if strings.Contains(when, prefix) {
// Extract number from conditions like "count() < 3"
parts := strings.Split(when, "<")
if len(parts) == 2 {
numStr := strings.TrimSpace(parts[1])
if num, err := strconv.Atoi(numStr); err == nil {
return num
}
}
}
return 0
}
func (c *v1beta3Converter) extractMemoryFromWhen(when string) int {
when = strings.TrimSpace(when)
// Handle conditions like "min(memoryCapacity) < 8Gi"
if strings.Contains(when, "memoryCapacity") {
parts := strings.Split(when, "<")
if len(parts) == 2 {
sizeStr := strings.TrimSpace(parts[1])
sizeStr = strings.TrimSuffix(sizeStr, "Gi")
if num, err := strconv.Atoi(sizeStr); err == nil {
return num
}
}
}
return 0
}
func (c *v1beta3Converter) extractStorageFromWhen(when string) int {
when = strings.TrimSpace(when)
// Handle conditions like "min(ephemeralStorageCapacity) < 40Gi"
if strings.Contains(when, "ephemeralStorageCapacity") {
parts := strings.Split(when, "<")
if len(parts) == 2 {
sizeStr := strings.TrimSpace(parts[1])
sizeStr = strings.TrimSuffix(sizeStr, "Gi")
if num, err := strconv.Atoi(sizeStr); err == nil {
return num
}
}
}
return 0
}
func (c *v1beta3Converter) templatizeMemoryOutcomes(analyzer map[interface{}]interface{}) {
c.templatizeNodeResourceOutcomes(analyzer, "memoryCapacity", "node.minMemoryGi", "node.recommendedMemoryGi")
}
func (c *v1beta3Converter) templatizeEphemeralOutcomes(analyzer map[interface{}]interface{}) {
c.templatizeNodeResourceOutcomes(analyzer, "ephemeralStorageCapacity", "node.minEphemeralGi", "node.recommendedEphemeralGi")
}
func (c *v1beta3Converter) templatizeNodeResourceOutcomes(analyzer map[interface{}]interface{}, capacity, minKey, recKey string) {
if nr, ok := analyzer["nodeResources"].(map[interface{}]interface{}); ok {
if outcomes, ok := nr["outcomes"].([]interface{}); ok {
for _, outcome := range outcomes {
if outcomeMap, ok := outcome.(map[interface{}]interface{}); ok {
// Update fail condition
if fail, ok := outcomeMap["fail"].(map[interface{}]interface{}); ok {
if when, ok := fail["when"].(string); ok && strings.Contains(when, capacity) {
fail["when"] = fmt.Sprintf("min(%s) < {{ .Values.%s }}Gi", capacity, minKey)
}
if _, ok := fail["message"].(string); ok {
parts := strings.Split(minKey, ".")
fail["message"] = fmt.Sprintf("All nodes must have at least {{ .Values.%s }} GiB of %s.", minKey, parts[len(parts)-1])
}
}
// Update warn condition
if warn, ok := outcomeMap["warn"].(map[interface{}]interface{}); ok {
if when, ok := warn["when"].(string); ok && strings.Contains(when, capacity) {
warn["when"] = fmt.Sprintf("min(%s) < {{ .Values.%s }}Gi", capacity, recKey)
}
if _, ok := warn["message"].(string); ok {
parts := strings.Split(recKey, ".")
warn["message"] = fmt.Sprintf("All nodes are recommended to have at least {{ .Values.%s }} GiB of %s.", recKey, parts[len(parts)-1])
}
}
// Update pass message
if pass, ok := outcomeMap["pass"].(map[interface{}]interface{}); ok {
if _, ok := pass["message"].(string); ok {
parts := strings.Split(recKey, ".")
pass["message"] = fmt.Sprintf("All nodes have at least {{ .Values.%s }} GiB of %s.", recKey, parts[len(parts)-1])
}
}
}
}
}
}
}
func (c *v1beta3Converter) setNestedValue(path string, value interface{}) {
parts := strings.Split(path, ".")
current := c.values
for _, part := range parts[:len(parts)-1] {
if _, ok := current[part]; !ok {
current[part] = make(map[string]interface{})
}
if nextMap, ok := current[part].(map[string]interface{}); ok {
current = nextMap
} else {
// Path exists but isn't a map, need to handle this case
return
}
}
current[parts[len(parts)-1]] = value
}

View File

@@ -21,6 +21,17 @@ func ConvertToV1Beta2(doc []byte) ([]byte, error) {
return doc, nil
}
if v == "troubleshoot.sh/v1beta3" {
// For v1beta3, just change the apiVersion to v1beta2
// The actual template rendering will be handled elsewhere
parsed["apiVersion"] = "troubleshoot.sh/v1beta2"
newDoc, err := yaml.Marshal(parsed)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal new spec")
}
return newDoc, nil
}
if v != "troubleshoot.replicated.com/v1beta1" {
return nil, errors.Errorf("cannot convert %s", v)
}

View File

@@ -200,7 +200,7 @@ func (l *specLoader) loadFromStrings(rawSpecs ...string) (*TroubleshootKinds, er
default:
return nil, types.NewExitCodeError(constants.EXIT_CODE_SPEC_ISSUES, errors.Errorf("%T type is not a Secret or ConfigMap", v))
}
} else if parsed.APIVersion == constants.Troubleshootv1beta2Kind || parsed.APIVersion == constants.Troubleshootv1beta1Kind {
} else if parsed.APIVersion == constants.Troubleshootv1beta3Kind || parsed.APIVersion == constants.Troubleshootv1beta2Kind || parsed.APIVersion == constants.Troubleshootv1beta1Kind {
// If it's not a configmap or secret, just append it to the splitdocs
splitdocs = append(splitdocs, rawDoc)
} else {

View File

@@ -2,14 +2,19 @@ package preflight
import (
"context"
"os"
"strings"
"github.com/pkg/errors"
"github.com/replicatedhq/troubleshoot/internal/specs"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/constants"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
"github.com/replicatedhq/troubleshoot/pkg/loader"
"github.com/spf13/viper"
"helm.sh/helm/v3/pkg/strvals"
"k8s.io/client-go/kubernetes"
yaml "sigs.k8s.io/yaml"
)
func readSpecs(args []string) (*loader.TroubleshootKinds, error) {
@@ -23,8 +28,20 @@ func readSpecs(args []string) (*loader.TroubleshootKinds, error) {
return nil, errors.Wrap(err, "failed to convert create k8s client")
}
// Pre-process v1beta3 specs with templates if values are provided
processedArgs, tempFiles, err := preprocessV1Beta3Specs(args)
if err != nil {
return nil, errors.Wrap(err, "failed to preprocess v1beta3 specs")
}
// Ensure any temp files created during preprocessing are cleaned up
defer func() {
for _, f := range tempFiles {
_ = os.Remove(f)
}
}()
ctx := context.Background()
kinds, err := specs.LoadFromCLIArgs(ctx, client, args, viper.GetViper())
kinds, err := specs.LoadFromCLIArgs(ctx, client, processedArgs, viper.GetViper())
if err != nil {
return nil, err
}
@@ -65,3 +82,127 @@ func readSpecs(args []string) (*loader.TroubleshootKinds, error) {
return ret, nil
}
// preprocessV1Beta3Specs processes v1beta3 specs with template rendering if values are provided
func preprocessV1Beta3Specs(args []string) ([]string, []string, error) {
valuesFiles := viper.GetStringSlice("values")
setValues := viper.GetStringSlice("set")
// If no values provided, return args unchanged
if len(valuesFiles) == 0 && len(setValues) == 0 {
return args, nil, nil
}
// Load values from files and --set flags
values := make(map[string]interface{})
for _, valuesFile := range valuesFiles {
if valuesFile == "" {
continue
}
data, err := os.ReadFile(valuesFile)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to read values file %s", valuesFile)
}
var fileValues map[string]interface{}
if err := yaml.Unmarshal(data, &fileValues); err != nil {
return nil, nil, errors.Wrapf(err, "failed to parse values file %s", valuesFile)
}
values = mergeMaps(values, fileValues)
}
// Apply --set values
for _, setValue := range setValues {
if err := strvals.ParseInto(setValue, values); err != nil {
return nil, nil, errors.Wrapf(err, "failed to parse --set value: %s", setValue)
}
}
// Process each arg
processedArgs := make([]string, 0, len(args))
tempFiles := make([]string, 0)
for _, arg := range args {
// Skip non-file arguments (like URLs, stdin, etc.)
if arg == "-" || strings.HasPrefix(arg, "http://") || strings.HasPrefix(arg, "https://") ||
strings.HasPrefix(arg, "secret/") || strings.HasPrefix(arg, "configmap/") {
processedArgs = append(processedArgs, arg)
continue
}
// Check if file exists
if _, err := os.Stat(arg); err != nil {
processedArgs = append(processedArgs, arg)
continue
}
// Read the file
content, err := os.ReadFile(arg)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to read file %s", arg)
}
// Check if it's a v1beta3 spec with templates
var parsed map[string]interface{}
if err := yaml.Unmarshal(content, &parsed); err != nil {
// Not valid YAML, might be templated - try to detect v1beta3
contentStr := string(content)
if strings.Contains(contentStr, "apiVersion: troubleshoot.sh/v1beta3") &&
strings.Contains(contentStr, "{{") && strings.Contains(contentStr, "}}") {
// It's a v1beta3 template, render it
rendered, err := RenderWithHelmTemplate(contentStr, values)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to render v1beta3 template %s", arg)
}
// Write to temp file
tmpFile, err := os.CreateTemp("", "preflight-rendered-*.yaml")
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create temp file")
}
if _, err := tmpFile.WriteString(rendered); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
return nil, nil, errors.Wrap(err, "failed to write rendered template")
}
tmpFile.Close()
processedArgs = append(processedArgs, tmpFile.Name())
tempFiles = append(tempFiles, tmpFile.Name())
} else {
processedArgs = append(processedArgs, arg)
}
} else {
// Valid YAML, check if it's v1beta3 with templates
if apiVersion, ok := parsed["apiVersion"]; ok && apiVersion == constants.Troubleshootv1beta3Kind {
contentStr := string(content)
if strings.Contains(contentStr, "{{") && strings.Contains(contentStr, "}}") {
// It's a v1beta3 template, render it
rendered, err := RenderWithHelmTemplate(contentStr, values)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to render v1beta3 template %s", arg)
}
// Write to temp file
tmpFile, err := os.CreateTemp("", "preflight-rendered-*.yaml")
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create temp file")
}
if _, err := tmpFile.WriteString(rendered); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
return nil, nil, errors.Wrap(err, "failed to write rendered template")
}
tmpFile.Close()
processedArgs = append(processedArgs, tmpFile.Name())
tempFiles = append(tempFiles, tmpFile.Name())
} else {
// v1beta3 but no templates
processedArgs = append(processedArgs, arg)
}
} else {
// Not v1beta3
processedArgs = append(processedArgs, arg)
}
}
}
return processedArgs, tempFiles, nil
}

View File

@@ -9,8 +9,8 @@ import (
"github.com/Masterminds/sprig/v3"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
"helm.sh/helm/v3/pkg/strvals"
yaml "sigs.k8s.io/yaml"
)
// RunTemplate processes a templated preflight spec file with provided values

View File

@@ -0,0 +1,367 @@
package preflight
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/loader"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// repoPath returns a path relative to the repository root from within pkg/preflight tests
func repoPath(rel string) string {
return filepath.Join("..", "..", rel)
}
func TestDetectAPIVersion_V1Beta3(t *testing.T) {
t.Parallel()
content, err := os.ReadFile(repoPath("v1beta3.yaml"))
require.NoError(t, err)
api := detectAPIVersion(string(content))
assert.Equal(t, "troubleshoot.sh/v1beta3", api)
}
func TestRender_V1Beta3_MinimalValues_YieldsNoAnalyzers(t *testing.T) {
t.Parallel()
tpl, err := os.ReadFile(repoPath("v1beta3.yaml"))
require.NoError(t, err)
valuesFile := repoPath("values-v1beta3-minimal.yaml")
vals, err := loadValuesFile(valuesFile)
require.NoError(t, err)
rendered, err := RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds, err := loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds.PreflightsV1Beta2, 1)
pf := kinds.PreflightsV1Beta2[0]
assert.Len(t, pf.Spec.Analyzers, 0)
}
func TestRender_V1Beta3_FullValues_ContainsExpectedAnalyzers(t *testing.T) {
t.Parallel()
tpl, err := os.ReadFile(repoPath("v1beta3.yaml"))
require.NoError(t, err)
valuesFile := repoPath("values-v1beta3-full.yaml")
vals, err := loadValuesFile(valuesFile)
require.NoError(t, err)
rendered, err := RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds, err := loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds.PreflightsV1Beta2, 1)
pf := kinds.PreflightsV1Beta2[0]
var hasStorageClass, hasCRD, hasRuntime, hasDistribution bool
nodeResourcesCount := 0
for _, a := range pf.Spec.Analyzers {
if a.StorageClass != nil {
hasStorageClass = true
assert.Equal(t, "Default StorageClass", a.StorageClass.CheckName)
assert.Equal(t, "default", a.StorageClass.StorageClassName)
}
if a.CustomResourceDefinition != nil {
hasCRD = true
assert.Equal(t, "Contour IngressRoute CRD", a.CustomResourceDefinition.CheckName)
assert.Equal(t, "ingressroutes.contour.heptio.com", a.CustomResourceDefinition.CustomResourceDefinitionName)
}
if a.ContainerRuntime != nil {
hasRuntime = true
}
if a.Distribution != nil {
hasDistribution = true
}
if a.NodeResources != nil {
nodeResourcesCount++
}
}
assert.True(t, hasStorageClass, "expected StorageClass analyzer present")
assert.True(t, hasCRD, "expected CustomResourceDefinition analyzer present")
assert.True(t, hasRuntime, "expected ContainerRuntime analyzer present")
assert.True(t, hasDistribution, "expected Distribution analyzer present")
assert.Equal(t, 4, nodeResourcesCount, "expected 4 NodeResources analyzers (count, cpu, memory, ephemeral)")
}
func TestRender_V1Beta3_MergeMultipleValuesFiles_And_SetPrecedence(t *testing.T) {
t.Parallel()
tpl, err := os.ReadFile(repoPath("v1beta3.yaml"))
require.NoError(t, err)
// Merge minimal + 1 + 3 => kubernetes.enabled should end up false due to last wins in file 3
vals := map[string]interface{}{}
for _, f := range []string{
repoPath("values-v1beta3-minimal.yaml"),
repoPath("values-v1beta3-1.yaml"),
repoPath("values-v1beta3-3.yaml"),
} {
m, err := loadValuesFile(f)
require.NoError(t, err)
vals = mergeMaps(vals, m)
}
// First render without --set; expect NO kubernetes analyzer
rendered, err := RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds, err := loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds.PreflightsV1Beta2, 1)
pf := kinds.PreflightsV1Beta2[0]
assert.False(t, containsAnalyzer(pf.Spec.Analyzers, "clusterVersion"))
// Apply --set kubernetes.enabled=true and re-render; expect kubernetes analyzer present
require.NoError(t, applySetValue(vals, "kubernetes.enabled=true"))
rendered2, err := RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds2, err := loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered2, Strict: true})
require.NoError(t, err)
require.Len(t, kinds2.PreflightsV1Beta2, 1)
pf2 := kinds2.PreflightsV1Beta2[0]
assert.True(t, containsAnalyzer(pf2.Spec.Analyzers, "clusterVersion"))
}
func containsAnalyzer(analyzers []*troubleshootv1beta2.Analyze, kind string) bool {
for _, a := range analyzers {
switch kind {
case "clusterVersion":
if a.ClusterVersion != nil {
return true
}
case "storageClass":
if a.StorageClass != nil {
return true
}
case "customResourceDefinition":
if a.CustomResourceDefinition != nil {
return true
}
case "containerRuntime":
if a.ContainerRuntime != nil {
return true
}
case "distribution":
if a.Distribution != nil {
return true
}
case "nodeResources":
if a.NodeResources != nil {
return true
}
}
}
return false
}
func TestRender_V1Beta3_CLI_ValuesAndSetFlags(t *testing.T) {
t.Parallel()
tpl, err := os.ReadFile(repoPath("v1beta3.yaml"))
require.NoError(t, err)
// Start with minimal values (no analyzers enabled)
vals, err := loadValuesFile(repoPath("values-v1beta3-minimal.yaml"))
require.NoError(t, err)
// Test: render with minimal values - should have no analyzers
rendered, err := RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds, err := loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds.PreflightsV1Beta2, 1)
pf := kinds.PreflightsV1Beta2[0]
assert.Len(t, pf.Spec.Analyzers, 0, "minimal values should produce no analyzers")
// Test: simulate CLI --set flag to enable kubernetes checks
err = applySetValue(vals, "kubernetes.enabled=true")
require.NoError(t, err)
rendered, err = RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds, err = loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds.PreflightsV1Beta2, 1)
pf = kinds.PreflightsV1Beta2[0]
assert.True(t, containsAnalyzer(pf.Spec.Analyzers, "clusterVersion"), "kubernetes analyzer should be present after --set kubernetes.enabled=true")
// Test: simulate CLI --set flag to override specific values
err = applySetValue(vals, "kubernetes.minVersion=1.25.0")
require.NoError(t, err)
err = applySetValue(vals, "kubernetes.recommendedVersion=1.27.0")
require.NoError(t, err)
rendered, err = RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds, err = loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds.PreflightsV1Beta2, 1)
pf = kinds.PreflightsV1Beta2[0]
// Verify the overridden values appear in the rendered spec
var clusterVersionAnalyzer *troubleshootv1beta2.ClusterVersion
for _, a := range pf.Spec.Analyzers {
if a.ClusterVersion != nil {
clusterVersionAnalyzer = a.ClusterVersion
break
}
}
require.NotNil(t, clusterVersionAnalyzer, "cluster version analyzer should be present")
// Check that our --set values are used in the rendered outcomes
foundMinVersion := false
foundRecommendedVersion := false
for _, outcome := range clusterVersionAnalyzer.Outcomes {
if outcome.Fail != nil && strings.Contains(outcome.Fail.When, "1.25.0") {
foundMinVersion = true
}
if outcome.Warn != nil && strings.Contains(outcome.Warn.When, "1.27.0") {
foundRecommendedVersion = true
}
}
assert.True(t, foundMinVersion, "should find --set minVersion in rendered spec")
assert.True(t, foundRecommendedVersion, "should find --set recommendedVersion in rendered spec")
// Test: multiple --set flags to enable multiple analyzer types
err = applySetValue(vals, "storage.enabled=true")
require.NoError(t, err)
err = applySetValue(vals, "runtime.enabled=true")
require.NoError(t, err)
rendered, err = RenderWithHelmTemplate(string(tpl), vals)
require.NoError(t, err)
kinds, err = loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds.PreflightsV1Beta2, 1)
pf = kinds.PreflightsV1Beta2[0]
assert.True(t, containsAnalyzer(pf.Spec.Analyzers, "clusterVersion"), "kubernetes analyzer should remain enabled")
assert.True(t, containsAnalyzer(pf.Spec.Analyzers, "storageClass"), "storage analyzer should be enabled")
assert.True(t, containsAnalyzer(pf.Spec.Analyzers, "containerRuntime"), "runtime analyzer should be enabled")
}
func TestRender_V1Beta3_InvalidTemplate_ErrorHandling(t *testing.T) {
t.Parallel()
// Test: malformed YAML syntax (actually, this should pass template rendering but fail YAML parsing later)
invalidYaml := `apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: invalid-yaml
spec:
analyzers:
- this is not valid yaml
missing proper structure:
- and wrong indentation
`
vals := map[string]interface{}{}
rendered, err := RenderWithHelmTemplate(invalidYaml, vals)
require.NoError(t, err, "template rendering should succeed even with malformed YAML")
// But loading the spec should fail due to invalid YAML structure
_, err = loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
assert.Error(t, err, "loading malformed YAML should produce an error")
// Test: invalid Helm template syntax
invalidTemplate := `apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: invalid-template
spec:
analyzers:
{{- if .Values.invalid.syntax with unclosed brackets
- clusterVersion:
outcomes:
- pass:
message: "This should fail"
`
_, err = RenderWithHelmTemplate(invalidTemplate, vals)
assert.Error(t, err, "invalid template syntax should produce an error")
// Test: template referencing undefined values with proper conditional check
templateWithUndefined := `apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: undefined-values
spec:
analyzers:
{{- if and .Values.nonexistent (ne .Values.nonexistent.field nil) }}
- clusterVersion:
checkName: "Version: {{ .Values.nonexistent.version }}"
outcomes:
- pass:
message: "Should not appear"
{{- end }}
`
rendered, err = RenderWithHelmTemplate(templateWithUndefined, vals)
require.NoError(t, err, "properly guarded undefined values should not cause template error")
kinds2, err := loader.LoadSpecs(context.Background(), loader.LoadOptions{RawSpec: rendered, Strict: true})
require.NoError(t, err)
require.Len(t, kinds2.PreflightsV1Beta2, 1)
pf2 := kinds2.PreflightsV1Beta2[0]
assert.Len(t, pf2.Spec.Analyzers, 0, "undefined values should result in no analyzers")
// Test: template that directly accesses undefined field (should error)
templateWithDirectUndefined := `apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: direct-undefined
spec:
analyzers:
- clusterVersion:
checkName: "{{ .Values.nonexistent.field }}"
outcomes:
- pass:
message: "Should fail"
`
_, err = RenderWithHelmTemplate(templateWithDirectUndefined, vals)
assert.Error(t, err, "directly accessing undefined nested values should cause template error")
// Test: template with missing required value (should error during template rendering)
templateMissingRequired := `apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: missing-required
spec:
analyzers:
- storageClass:
checkName: "Storage Test"
storageClassName: {{ .Values.storage.className }}
outcomes:
- pass:
message: "Storage is good"
`
valsWithoutStorage := map[string]interface{}{
"other": map[string]interface{}{
"field": "value",
},
}
_, err = RenderWithHelmTemplate(templateMissingRequired, valsWithoutStorage)
assert.Error(t, err, "template rendering should fail when accessing undefined nested values")
// Test: circular reference in values (this would be a user config error)
circularVals := map[string]interface{}{
"test": map[string]interface{}{
"field": "{{ .Values.test.field }}", // This would create infinite loop if processed
},
}
templateWithCircular := `apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: circular-test
spec:
analyzers:
- data:
name: test.json
data: |
{"value": "{{ .Values.test.field }}"}
`
// Helm template engine should handle this gracefully (it doesn't recursively process string values)
rendered, err = RenderWithHelmTemplate(templateWithCircular, circularVals)
require.NoError(t, err, "circular reference in values should not crash template engine")
assert.Contains(t, rendered, "{{ .Values.test.field }}", "circular reference should render as literal string")
}

244
v1beta3.yaml Normal file
View File

@@ -0,0 +1,244 @@
apiVersion: troubleshoot.sh/v1beta3
kind: Preflight
metadata:
name: templated-from-v1beta2
spec:
analyzers:
{{- if .Values.kubernetes.enabled }}
- docString: |
Title: Kubernetes Control Plane Requirements
Requirement:
- Version:
- Minimum: {{ .Values.kubernetes.minVersion }}
- Recommended: {{ .Values.kubernetes.recommendedVersion }}
- Docs: https://kubernetes.io
These version targets ensure that required APIs and default behaviors are
available and patched. Moving below the minimum commonly removes GA APIs
(e.g., apps/v1 workloads, storage and ingress v1 APIs), changes admission
defaults, and lacks critical CVE fixes. Running at or above the recommended
version matches what is exercised most extensively in CI and receives the
best operational guidance for upgrades and incident response.
clusterVersion:
checkName: Kubernetes version
outcomes:
- fail:
when: '< {{ .Values.kubernetes.minVersion }}'
message: This application requires at least Kubernetes {{ .Values.kubernetes.minVersion }}, and recommends {{ .Values.kubernetes.recommendedVersion }}.
uri: https://www.kubernetes.io
- warn:
when: '< {{ .Values.kubernetes.recommendedVersion }}'
message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to {{ .Values.kubernetes.recommendedVersion }} or later.
uri: https://kubernetes.io
- pass:
when: '>= {{ .Values.kubernetes.recommendedVersion }}'
message: Your cluster meets the recommended and required versions of Kubernetes.
{{- end }}
{{- if .Values.ingress.enabled }}
- docString: |
Title: Required CRDs and Ingress Capabilities
Requirement:
- Ingress Controller: Contour
- CRD must be present:
- Group: heptio.com
- Kind: IngressRoute
- Version: v1beta1 or later served version
The ingress layer terminates TLS and routes external traffic to Services.
Contour relies on the IngressRoute CRD to express host/path routing, TLS
configuration, and policy. If the CRD is not installed and served by the
API server, Contour cannot reconcile desired state, leaving routes
unconfigured and traffic unreachable.
{{- if eq .Values.ingress.type "Contour" }}
customResourceDefinition:
checkName: Contour IngressRoute CRD
customResourceDefinitionName: ingressroutes.contour.heptio.com
outcomes:
- fail:
message: Contour IngressRoute CRD not found; required for ingress routing
- pass:
message: Contour IngressRoute CRD present
{{- end }}
{{- end }}
{{- if .Values.runtime.enabled }}
- docString: |
Title: Container Runtime Requirements
Requirement:
- Runtime: containerd (CRI)
- Kubelet cgroup driver: systemd
- CRI socket path: /run/containerd/containerd.sock
containerd (via the CRI) is the supported runtime for predictable container
lifecycle management. On modern distros (cgroup v2), kubelet and the OS must
both use the systemd cgroup driver to avoid resource accounting mismatches
that lead to unexpected OOMKills and throttling. The CRI socket path must
match kubelet configuration so the node can start and manage pods.
containerRuntime:
outcomes:
- pass:
when: '== containerd'
message: containerd runtime detected
- fail:
message: Unsupported container runtime; containerd required
{{- end }}
{{- if .Values.storage.enabled }}
- docString: |
Title: Default StorageClass Requirements
Requirement:
- A StorageClass named "{{ .Values.storage.className }}" must exist (cluster default preferred)
- AccessMode: ReadWriteOnce (RWO) required (RWX optional)
- VolumeBindingMode: WaitForFirstConsumer preferred
- allowVolumeExpansion: true recommended
A default StorageClass enables dynamic PVC provisioning without manual
intervention. RWO provides baseline persistence semantics for stateful pods.
WaitForFirstConsumer defers binding until a pod is scheduled, improving
topology-aware placement (zonal/az) and reducing unschedulable PVCs.
AllowVolumeExpansion permits online growth during capacity pressure
without disruptive migrations.
storageClass:
checkName: Default StorageClass
storageClassName: '{{ .Values.storage.className }}'
outcomes:
- fail:
message: Default StorageClass not found
- pass:
message: Default StorageClass present
{{- end }}
{{- if .Values.distribution.enabled }}
- docString: |
Title: Kubernetes Distribution Support
Requirement:
- Unsupported: docker-desktop, microk8s, minikube
- Supported: eks, gke, aks, kurl, digitalocean, rke2, k3s, oke, kind
Development or single-node environments are optimized for local testing and
omit HA control-plane patterns, cloud integration, and production defaults.
The supported distributions are validated for API compatibility, RBAC
expectations, admission behavior, and default storage/networking this
application depends on.
distribution:
outcomes:
- fail:
when: '== docker-desktop'
message: The application does not support Docker Desktop Clusters
- fail:
when: '== microk8s'
message: The application does not support Microk8s Clusters
- fail:
when: '== minikube'
message: The application does not support Minikube Clusters
- pass:
when: '== eks'
message: EKS is a supported distribution
- pass:
when: '== gke'
message: GKE is a supported distribution
- pass:
when: '== aks'
message: AKS is a supported distribution
- pass:
when: '== kurl'
message: KURL is a supported distribution
- pass:
when: '== digitalocean'
message: DigitalOcean is a supported distribution
- pass:
when: '== rke2'
message: RKE2 is a supported distribution
- pass:
when: '== k3s'
message: K3S is a supported distribution
- pass:
when: '== oke'
message: OKE is a supported distribution
- pass:
when: '== kind'
message: Kind is a supported distribution
- warn:
message: Unable to determine the distribution of Kubernetes
{{- end }}
{{- if .Values.nodeChecks.count.enabled }}
- docString: |
Title: Node count requirement
Requirement:
- Node count: Minimum {{ .Values.cluster.minNodes }} nodes, Recommended {{ .Values.cluster.recommendedNodes }} nodes
Multiple worker nodes provide scheduling capacity, tolerance to disruptions,
and safe rolling updates. Operating below the recommendation increases risk
of unschedulable pods during maintenance or failures and reduces headroom
for horizontal scaling.
nodeResources:
checkName: Node count
outcomes:
- fail:
when: 'count() < {{ .Values.cluster.minNodes }}'
message: This application requires at least {{ .Values.cluster.minNodes }} nodes.
uri: https://kurl.sh/docs/install-with-kurl/adding-nodes
- warn:
when: 'count() < {{ .Values.cluster.recommendedNodes }}'
message: This application recommends at least {{ .Values.cluster.recommendedNodes }} nodes.
uri: https://kurl.sh/docs/install-with-kurl/adding-nodes
- pass:
message: This cluster has enough nodes.
{{- end }}
{{- if .Values.nodeChecks.cpu.enabled }}
- docString: |
Title: Cluster CPU requirement
Requirement:
- Total CPU: Minimum {{ .Values.cluster.minCPU }} vCPU
Aggregate CPU must cover system daemons, controllers, and application pods.
Insufficient CPU causes prolonged scheduling latency, readiness probe
failures, and throughput collapse under load.
nodeResources:
checkName: Cluster CPU total
outcomes:
- fail:
when: 'sum(cpuCapacity) < {{ .Values.cluster.minCPU }}'
message: The cluster must contain at least {{ .Values.cluster.minCPU }} cores
uri: https://kurl.sh/docs/install-with-kurl/system-requirements
- pass:
message: There are at least {{ .Values.cluster.minCPU }} cores in the cluster
{{- end }}
{{- if .Values.nodeChecks.memory.enabled }}
- docString: |
Title: Per-node memory requirement
Requirement:
- Per-node memory: Minimum {{ .Values.node.minMemoryGi }} GiB; Recommended {{ .Values.node.recommendedMemoryGi }} GiB
Nodes must reserve memory for kubelet/system components and per-pod overhead.
Below the minimum, pods will frequently be OOMKilled or evicted. The
recommended capacity provides headroom for spikes, compactions, and
upgrades without destabilizing workloads.
nodeResources:
checkName: Per-node memory requirement
outcomes:
- fail:
when: 'min(memoryCapacity) < {{ .Values.node.minMemoryGi }}Gi'
message: All nodes must have at least {{ .Values.node.minMemoryGi }} GiB of memory.
uri: https://kurl.sh/docs/install-with-kurl/system-requirements
- warn:
when: 'min(memoryCapacity) < {{ .Values.node.recommendedMemoryGi }}Gi'
message: All nodes are recommended to have at least {{ .Values.node.recommendedMemoryGi }} GiB of memory.
uri: https://kurl.sh/docs/install-with-kurl/system-requirements
- pass:
message: All nodes have at least {{ .Values.node.recommendedMemoryGi }} GiB of memory.
{{- end }}
{{- if .Values.nodeChecks.ephemeral.enabled }}
- docString: |
Title: Per-node ephemeral storage requirement
Requirement:
- Per-node ephemeral storage: Minimum {{ .Values.node.minEphemeralGi }} GiB; Recommended {{ .Values.node.recommendedEphemeralGi }} GiB
Ephemeral storage backs image layers, writable container filesystems, logs,
and temporary data. When capacity is low, kubelet enters disk-pressure
eviction and image pulls fail, causing pod restarts and data loss for
transient files.
nodeResources:
checkName: Per-node ephemeral storage requirement
outcomes:
- fail:
when: 'min(ephemeralStorageCapacity) < {{ .Values.node.minEphemeralGi }}Gi'
message: All nodes must have at least {{ .Values.node.minEphemeralGi }} GiB of ephemeral storage.
uri: https://kurl.sh/docs/install-with-kurl/system-requirements
- warn:
when: 'min(ephemeralStorageCapacity) < {{ .Values.node.recommendedEphemeralGi }}Gi'
message: All nodes are recommended to have at least {{ .Values.node.recommendedEphemeralGi }} GiB of ephemeral storage.
uri: https://kurl.sh/docs/install-with-kurl/system-requirements
- pass:
message: All nodes have at least {{ .Values.node.recommendedEphemeralGi }} GiB of ephemeral storage.
{{- end }}

10
values-v1beta3-1.yaml Normal file
View File

@@ -0,0 +1,10 @@
# Minimal values for v1beta3-templated-from-v1beta2.yaml
kubernetes:
enabled: true
minVersion: "1.22.0"
recommendedVersion: "1.29.0"
storage:
enabled: true
className: "default"

10
values-v1beta3-2.yaml Normal file
View File

@@ -0,0 +1,10 @@
cluster:
minNodes: 3
recommendedNodes: 3
minCPU: 4
node:
minMemoryGi: 8
recommendedMemoryGi: 16
minEphemeralGi: 40
recommendedEphemeralGi: 40

26
values-v1beta3-3.yaml Normal file
View File

@@ -0,0 +1,26 @@
ingress:
enabled: true
type: "Contour"
runtime:
enabled: true
distribution:
enabled: true
nodeChecks:
enabled: true
count:
enabled: true
cpu:
enabled: true
memory:
enabled: true
ephemeral:
enabled: true
kubernetes:
enabled: false
minVersion: "1.22.0"
recommendedVersion: "1.29.0"

66
values-v1beta3-full.yaml Normal file
View File

@@ -0,0 +1,66 @@
# Values for v1beta3-templated-from-v1beta2.yaml
kubernetes:
enabled: false
minVersion: "1.22.0"
recommendedVersion: "1.29.0"
storage:
enabled: true
className: "default"
cluster:
minNodes: 3
recommendedNodes: 5
minCPU: 4
node:
minMemoryGi: 8
recommendedMemoryGi: 32
minEphemeralGi: 40
recommendedEphemeralGi: 100
ingress:
enabled: true
type: "Contour"
contour:
crdName: "ingressroutes.contour.heptio.com"
crdGroup: "heptio.com"
crdKind: "IngressRoute"
crdVersion: "v1beta1 or later served version"
runtime:
enabled: true
name: "containerd"
cgroupDriver: "systemd"
criSocket: "/run/containerd/containerd.sock"
distribution:
enabled: true
unsupported:
- docker-desktop
- microk8s
- minikube
supported:
- eks
- gke
- aks
- kurl
- digitalocean
- rke2
- k3s
- oke
- kind
nodeChecks:
enabled: true
count:
enabled: true
cpu:
enabled: true
memory:
enabled: true
ephemeral:
enabled: true

View File

@@ -0,0 +1,44 @@
# Minimal values for v1beta3-templated-from-v1beta2.yaml
kubernetes:
enabled: false
minVersion: "1.22.0"
recommendedVersion: "1.29.0"
storage:
enabled: false
className: "default"
cluster:
minNodes: 3
recommendedNodes: 3
minCPU: 4
node:
minMemoryGi: 8
recommendedMemoryGi: 16
minEphemeralGi: 40
recommendedEphemeralGi: 40
ingress:
enabled: false
type: "Contour"
runtime:
enabled: false
distribution:
enabled: false
nodeChecks:
enabled: false
count:
enabled: false
cpu:
enabled: false
memory:
enabled: false
ephemeral:
enabled: false