Update Kustomize content

This commit is contained in:
Jérôme Petazzoni
2025-10-28 16:20:37 +01:00
parent f25abf663b
commit 078e799666
23 changed files with 1327 additions and 243 deletions

View File

@@ -0,0 +1,12 @@
# This removes the haproxy Deployment.
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
kind: Deployment
apiVersion: apps/v1
metadata:
name: haproxy

View File

@@ -0,0 +1,14 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
# Within a Kustomization, it is not possible to specify in which
# order transformations (patches, replacements, etc) should be
# executed. If we want to execute transformations in a specific
# order, one possibility is to put them in individual components,
# and then invoke these components in the order we want.
# It works, but it creates an extra level of indirection, which
# reduces readability and complicates maintenance.
components:
- setup
- cleanup

View File

@@ -0,0 +1,20 @@
global
#log stdout format raw local0
#daemon
maxconn 32
defaults
#log global
timeout client 1h
timeout connect 1h
timeout server 1h
mode http
option abortonclose
frontend metrics
bind :9000
http-request use-service prometheus-exporter
frontend ollama_frontend
bind :8000
default_backend ollama_backend
maxconn 16
backend ollama_backend
server ollama_server localhost:11434 check

View File

@@ -0,0 +1,39 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: haproxy
name: haproxy
spec:
selector:
matchLabels:
app: haproxy
template:
metadata:
labels:
app: haproxy
spec:
volumes:
- name: haproxy
configMap:
name: haproxy
containers:
- image: haproxy:3.0
name: haproxy
volumeMounts:
- name: haproxy
mountPath: /usr/local/etc/haproxy
readinessProbe:
httpGet:
port: 9000
ports:
- name: haproxy
containerPort: 8000
- name: metrics
containerPort: 9000
resources:
requests:
cpu: 0.05
limits:
cpu: 1

View File

@@ -0,0 +1,75 @@
# This adds a sidecar to the ollama Deployment, by taking
# the pod template and volumes from the haproxy Deployment.
# The idea is to allow to run ollama+haproxy in two modes:
# - separately (each with their own Deployment),
# - together in the same Pod, sidecar-style.
# The YAML files define how to run them separetely, and this
# "replacements" directive fetches a specific volume and
# a specific container from the haproxy Deployment, to add
# them to the ollama Deployment.
#
# This would be simpler if kustomize allowed to append or
# merge lists in "replacements"; but it doesn't seem to be
# possible at the moment.
#
# It would be even better if kustomize allowed to perform
# a strategic merge using a fieldPath as the source, because
# we could merge both the containers and the volumes in a
# single operation.
#
# Note that technically, it might be possible to layer
# multiple kustomizations so that one generates the patch
# to be used in another; but it wouldn't be very readable
# or maintainable so we decided to not do that right now.
#
# However, the current approach (fetching fields one by one)
# has an advantage: it could let us transform the haproxy
# container into a real sidecar (i.e. an initContainer with
# a restartPolicy=Always).
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
- haproxy.yaml
configMapGenerator:
- name: haproxy
files:
- haproxy.cfg
replacements:
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.volumes.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.volumes.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.containers.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
targets:
- select:
kind: Service
name: ollama
fieldPaths:
- spec.ports.[name=11434].targetPort

View File

@@ -0,0 +1,34 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 2
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- port: 80
selector:
app: blue

View File

@@ -0,0 +1,94 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Each of these YAML files contains a Deployment and a Service.
# The blue.yaml file is here just to demonstrate that the rest
# of this Kustomization can be precisely scoped to the ollama
# Deployment (and Service): the blue Deployment and Service
# shouldn't be affected by our kustomize transformers.
resources:
- ollama.yaml
- blue.yaml
buildMetadata:
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
- managedByLabel
# Add an annotation config.kubernetes.io/origin, indicating:
# - which file defined that resource;
# - if it comes from a git repository, which one, and which
# ref (tag, branch...) it was.
- originAnnotations
# Add an annotation alpha.config.kubernetes.io/transformations
# indicating which patches and other transformers have changed
# each resource.
- transformerAnnotations
# Let's generate a ConfigMap with literal values.
# Note that this will actually add a suffix to the name of the
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
# references to the ConfigMap (e.g. in Deployment manifests)
# accordingly. The suffix is a hash of the ConfigMap contents,
# so that basically, if the ConfigMap is edited, any workload
# using that ConfigMap will automatically do a rolling update.
configMapGenerator:
- name: ollama
literals:
- "model=gemma3:270m"
- "prompt=If you visit Paris, I suggest that you"
- "queue=4"
name: ollama
patches:
# The Deployment manifest in ollama.yaml doesn't specify
# resource requests and limits, so that it can run on any
# cluster (including resource-constrained local clusters
# like KiND or minikube). The example belows add CPU
# requests and limits using a strategic merge patch.
# The patch is inlined here, but it could also be put
# in a file and referenced with "path: xxxxxx.yaml".
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
template:
spec:
containers:
- name: ollama
resources:
requests:
cpu: 1
limits:
cpu: 2
# This will have the same effect, with one little detail:
# JSON patches cannot specify containers by name, so this
# assumes that the ollama container is the first one in
# the pod template (whereas the strategic merge patch can
# use "merge keys" and identify containers by their name).
#- target:
# kind: Deployment
# name: ollama
# patch: |
# - op: add
# path: /spec/template/spec/containers/0/resources
# value:
# requests:
# cpu: 1
# limits:
# cpu: 2
# A "component" is a bit like a "base", in the sense that
# it lets us define some reusable resources and behaviors.
# There is a key different, though:
# - a "base" will be evaluated in isolation: it will
# generate+transform some resources, then these resources
# will be included in the main Kustomization;
# - a "component" has access to all the resources that
# have been generated by the main Kustomization, which
# means that it can transform them (with patches etc).
components:
- add-haproxy-sidecar

View File

@@ -0,0 +1,73 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ollama
name: ollama
spec:
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
volumes:
- name: ollama
hostPath:
path: /opt/ollama
type: DirectoryOrCreate
containers:
- image: ollama/ollama
name: ollama
env:
- name: OLLAMA_MAX_QUEUE
valueFrom:
configMapKeyRef:
name: ollama
key: queue
- name: MODEL
valueFrom:
configMapKeyRef:
name: ollama
key: model
volumeMounts:
- name: ollama
mountPath: /root/.ollama
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- ollama pull $MODEL
livenessProbe:
httpGet:
port: 11434
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ollama show $MODEL
ports:
- name: ollama
containerPort: 11434
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ollama
name: ollama
spec:
ports:
- name: "11434"
port: 11434
protocol: TCP
targetPort: 11434
selector:
app: ollama
type: ClusterIP

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices
- redis

View File

@@ -0,0 +1,13 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices.yaml
transformers:
- |
apiVersion: builtin
kind: PrefixSuffixTransformer
metadata:
name: use-ghcr-io
prefix: ghcr.io/
fieldSpecs:
- path: spec/template/spec/containers/image

View File

@@ -0,0 +1,125 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- redis.yaml

View File

@@ -0,0 +1,35 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View File

@@ -0,0 +1,160 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -0,0 +1,30 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
# It will soon be possible to use regexes in replacement selectors,
# meaning that the "labelSelector:" above can be replaced with the
# following "name:" selector which is a tiny bit simpler:
#name: hasher|rng|webui|worker
# Regex support in replacement selectors was added by this PR:
# https://github.com/kubernetes-sigs/kustomize/pull/5863
# This PR was merged in August 2025, but as of October 2025, the
# latest release of Kustomize is 5.7.1, which was released in July.
# Hopefully the feature will be available in the next release :)
# Another possibility would be to select all Deployments, and then
# reject the one(s) for which we don't want to update the registry;
# for instance:
#reject:
# kind: Deployment
# name: redis
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0

View File

@@ -18,51 +18,7 @@
---
## From `kubectl run` to YAML
- We can create resources with one-line commands
(`kubectl run`, `kubectl create deployment`, `kubectl expose`...)
- We can also create resources by loading YAML files
(with `kubectl apply -f`, `kubectl create -f`...)
- There can be multiple resources in a single YAML files
(making them convenient to deploy entire stacks)
- However, these YAML bundles often need to be customized
(e.g.: number of replicas, image version to use, features to enable...)
---
## Beyond YAML
- Very often, after putting together our first `app.yaml`, we end up with:
- `app-prod.yaml`
- `app-staging.yaml`
- `app-dev.yaml`
- instructions indicating to users "please tweak this and that in the YAML"
- That's where using something like
[CUE](https://github.com/cue-labs/cue-by-example/tree/main/003_kubernetes_tutorial),
[Kustomize](https://kustomize.io/),
or [Helm](https://helm.sh/) can help!
- Now we can do something like this:
```bash
helm install app ... --set this.parameter=that.value
```
---
## Other features of Helm
## Helm features
- With Helm, we create "charts"

View File

@@ -28,14 +28,14 @@
(or other *kustomizations*)
- Somewhat integrated with `kubectl`
(but only "somewhat" because of version discrepancies)
- Less complex than e.g. Helm, but also less powerful
- Integrated with `kubectl`
- No central index like the Artifact Hub (but is there a need for it?)
- *Some* modifications will be difficult to make
- 100% file-based (won't use command-line flags, environment variables...)
---
## Kustomize in a nutshell
@@ -48,7 +48,7 @@
- reference other kustomizations
- add some *patches*
- add some *patches* and other *transformations*
- ...
@@ -67,13 +67,13 @@ used in the Ingress).
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
patchesStrategicMerge:
- scale-deployment.yaml
- ingress-hostname.yaml
resources:
- deployment.yaml
- service.yaml
- ingress.yaml
patches:
- path: scale-deployment.yaml
- path: ingress-hostname.yaml
```
On the next slide, let's see a more complex example ...
@@ -87,31 +87,48 @@ On the next slide, let's see a more complex example ...
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
commonAnnotations:
mood: 😎
commonLabels:
add-this-to-all-my-resources: please
namePrefix: prod-
patchesStrategicMerge:
- prod-scaling.yaml
- prod-healthchecks.yaml
bases:
- api/
- frontend/
- db/
- github.com/example/app?ref=tag-or-branch
last-commit-message: "Bump libfoo to version 1.2.3"
labels:
- pairs:
last-commit-hash: "39bc2d"
resources:
- github.com/example/front?ref=tag-or-branch
- github.com/example/api?ref=tag-or-branch
- db/
- workers/
- ingress.yaml
- permissions.yaml
- rbac.yaml
configMapGenerator:
- name: appconfig
files:
- global.conf
- local.conf=prod.conf
patches:
- path: healthchecks.yaml
- path: resources-requests-and-limits.yaml
```
]
---
## Architecture
- Internally, Kustomize has three phases:
- generators (=produce a bunch of YAML, e.g. by reading manifest files)
- transformers (=transform/patch that YAML in various ways)
- validators (=has the ability to stop the process is something's wrong)
- In the previous examples:
- `resources` and `configMapGenerator` are generators
- `commonAnnotations`, `labels`, `patches` are transformers
---
## Glossary
- A *base* is a kustomization that is referred to by other kustomizations
@@ -122,10 +139,6 @@ configMapGenerator:
(a kustomization can refer to another, which can refer to a third)
- A *patch* describes how to alter an existing resource
(e.g. to change the image in a Deployment; or scaling parameters; etc.)
- A *variant* is the final outcome of applying bases + overlays
(See the [kustomize glossary][glossary] for more definitions!)
@@ -134,20 +147,6 @@ configMapGenerator:
---
## What Kustomize *cannot* do
- By design, there are a number of things that Kustomize won't do
- For instance:
- using command-line arguments or environment variables to generate a variant
- overlays can only *add* resources, not *remove* them
- See the full list of [eschewed features](https://kubectl.docs.kubernetes.io/faq/kustomize/eschewedfeatures/) for more details
---
## Kustomize workflows
- The Kustomize documentation proposes two different workflows
@@ -166,43 +165,7 @@ configMapGenerator:
- we may regularly update the base, or use a remote base
---
## Remote bases
- Kustomize can also use bases that are remote git repositories
- Examples:
github.com/jpetazzo/kubercoins (remote git repository)
github.com/jpetazzo/kubercoins?ref=kustomize (specific tag or branch)
- Note that this only works for kustomizations, not individual resources
(the specified repository or directory must contain a `kustomization.yaml` file)
---
class: extra-details
## Hashicorp go-getter
- Some versions of Kustomize support additional forms for remote resources
- Examples:
https://releases.hello.io/k/1.0.zip (remote archive)
https://releases.hello.io/k/1.0.zip//some-subdir (subdirectory in archive)
- This relies on [hashicorp/go-getter](https://github.com/hashicorp/go-getter#url-format)
- ... But it prevents Kustomize inclusion in `kubectl`
- Avoid them!
- See [kustomize#3578](https://github.com/kubernetes-sigs/kustomize/issues/3578) for details
- No technical difference; these are just different use-cases!
---
@@ -287,99 +250,184 @@ General workflow:
- Kubernetes 1.21 jumps to Kustomize 4.1.2
- Future versions should track Kustomize updates more closely
- Kustomize is now officially part of [sig-cli]
- `kubectl` is usually in sync with recent versions of Kustomize
(but it can still lag behind a bit, so some features might not be available!)
[sig-cli]: https://github.com/kubernetes/community/blob/master/sig-cli/README.md
---
class: extra-details
## Kustomize features
## Differences between 2.0.3 and later
- A good starting point for Kustomize features is [the Kustomization File reference](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/)
- Kustomize 2.1 / 3.0 deprecates `bases` (they should be listed in `resources`)
- Unfortunately, the Kustomize documentation is far from perfect
(this means that "modern" `kustomize edit add resource` won't work with "old" `kubectl apply -k`)
- Some features are undocumented
- Kustomize 2.1 introduces `replicas` and `envs`
- Some features are deprecated / replaced by others
- Kustomize 3.1 introduces multipatches
(but that's not well indicated in the docs; e.g. `commonLabels`)
- Kustomize 3.2 introduce inline patches in `kustomization.yaml`
- Some features are documented but not released yet
- Kustomize 3.3 to 3.10 is mostly internal refactoring
- Kustomize 4.0 drops go-getter again
- Kustomize 4.1 allows patching kind and name
(e.g. regex selectors in `replacements` as of October 2025)
---
## Adding labels
## Kustomization basics
Labels can be added to all resources liks this:
- `bases` (deprecated), `resources`
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
...
commonLabels:
app.kubernetes.io/name: dockercoins
```
*include YAML manifests*
Or with the equivalent CLI command:
- `buildMetadata`
*automatically generate interesting labels / annotations*
- `commonAnnotations`, `commonLabels`, `labels`
*add custom labels / annotations to all resources*
- `configMapGenerator`, `secretGenerator`, `generatorOptions`
*generate ConfigMaps and Secrets; appending a hash suffix to their name*
---
## Transforming resources
- `patches` `patchesJson6902`, `patchesStrategicMerge`
- perform (almost) arbitrary modifications to resources
- can be used to remove fields or even entire resources
- patches can be in separate files, or inlined within Kustomization file
- patches can apply to a specific resource, or to selected resources
- `images`, `namePrefix`, `namespace`, `nameSuffix`, `replicas`
- helpers for basic modifications; concise and easy to use, but limited
---
## More transformations
- `replacements`
- update individual fields (a bit like patches)
- can also do substring updates (e.g. replace an image registry)
- can copy a field from a place to another (e.g. a whole container spec)
- can apply to individual resources or to selected resources
- resources can also be *filtered out* (to be excluded from replacement)
---
## Teach Kustomize new tricks
- `crds` = make Kustomize aware of e.g. ConfigMap fields in CRDs
- `openapi` = give a schema to teach Kustomize about merge keys etc.
- `sortOptions` = output resources in a specific order
- `helmCharts` = evaluate a Helm chart template
(only available with `--enable-helm` flag; not standard / GA yet!)
- `vars` = define variables and reuse them elsewhere
(limited to some specific fields; ...actually, it's being deprecated already!)
- `components` = somewhat similar to an "include"
(points to a "component" and invoke all its generators and transformers)
---
## Remote bases
- Kustomize can also use bases that are remote git repositories
- Example:
https://github.com/kubernetes-sigs/kustomize//examples/multibases/?ref=v3.3.1
- See [remoteBuild.md](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/remoteBuild.md) for details about remote targets URL format
- Note that this only works for kustomizations, not individual resources
(the specified repository or directory must contain a `kustomization.yaml` file)
---
## What Kustomize *cannot* do
- By design, there are a number of things that Kustomize won't do
- For instance:
- using command-line arguments or environment variables to generate a variant
- overlays can only *add* resources, not *remove* them¹
- See the full list of [eschewed features](https://kubectl.docs.kubernetes.io/faq/kustomize/eschewedfeatures/) for more details
.footnote[¹That's actually not true; patches can remove resources.]
---
## Changing image references
- We're going to see a few different ways to change image references
- Let's assume that our app uses multiple images:
`redis`, `dockercoins/hasher`, `dockercoins/worker`...
- We want to update the `dockercoins/*` images to use a registry mirror
(e.g. `ghcr.io/dockercoins/*`)
- We don't want to touch the other images
---
## Changing images with the CLI
We can use the following CLI command:
```bash
kustomize edit add label app.kubernetes.io/name:dockercoins
kustomize edit set image name=[newName][:newTag][@digest]
```
---
- `[]` denote optional parameters
## Use cases for labels
- Example: clean up components that have been removed from the kustomization
- Assuming that `commonLabels` have been set as shown on the previous slide:
```bash
kubectl apply -k . --prune --selector app.kubernetes.io/name=dockercoins
```
- ... This command removes resources that have been removed from the kustomization
- Technically, resources with:
- a `kubectl.kubernetes.io/last-applied-configuration` annotation
- labels matching the given selector
---
## Scaling
Instead of using a patch, scaling can be done like this:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
...
replicas:
- name: worker
count: 5
```
or the CLI equivalent:
- `:` and `@` are the delimiters used to indicate a field
Examples:
```bash
kustomize edit set replicas worker=5
kustomize edit set image dockercoins/worker=ghcr.io/dockercoins/worker
kustomize edit set image dockercoins/worker=ghcr.io/dockercoins/worker:v0.2
kustomize edit set image dockercoins/worker=:v0.2
```
It will automatically work with Deployments, ReplicaSets, StatefulSets.
(For other resource types, fall back to a patch.)
This will add entries in the `images:` section of the kustomization.
---
## Updating images
## Changing images with `images:`
Instead of using patches, images can be changed like this:
Here are a few examples of the `images:` Kustomization directive:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
@@ -397,31 +445,28 @@ images:
digest: sha256:24a0c4b4a4c0eb97a1aabb8e29f18e917d05abfe1b7a7c07857230879ce7d3d3
```
---
## `images:` in practice
This is what we would need for our app:
```yaml
- name: dockercoins/hasher
newName: ghcr.io/dockercoins/hasher
- name: dockercoins/rng
newName: ghcr.io/dockercoins/rng
- name: dockercoins/webui
newName: ghcr.io/dockercoins/webui
- name: dockercoins/worker
newName: ghcr.io/dockercoins/worker
```
It works, but requires two lines per image. Can we do better? 🤔
---
## Updating images with the CLI
To add an entry in the `images:` section of the kustomization:
```bash
kustomize edit set image name=[newName][:newTag][@digest]
```
- `[]` denote optional parameters
- `:` and `@` are the delimiters used to indicate a field
Examples:
```bash
kustomize edit set image dockercoins/worker=ghcr.io/dockercoins/worker
kustomize edit set image dockercoins/worker=ghcr.io/dockercoins/worker:v0.2
kustomize edit set image dockercoins/worker=:v0.2
```
---
## Updating images, pros and cons
## `images:`, pros and cons
- Very convenient when the same image appears multiple times
@@ -435,70 +480,143 @@ kustomize edit set image dockercoins/worker=:v0.2
- Only patches "well-known" image fields (won't work with CRDs referencing images)
- Helm can deal with these scenarios, for instance:
```yaml
image: {{ .Values.registry }}/worker:{{ .Values.version }}
```
- If our app uses 4 images, we'll need 4 `images:` section in the Kustomization file
---
## Advanced resource patching
## `PrefixSuffixTransformer`
The example below shows how to:
- Internally, the `namePrefix` directive relies on a `PrefixSuffixTransformer`
- patch multiple resources with a selector (new in Kustomize 3.1)
- use an inline patch instead of a separate patch file (new in Kustomize 3.2)
- By default, that transformer acts on `metadata.name`
- It can be invoked manually and configured to act on other fields
(for instance, `spec.template.spec.containers.image` in a Deployment manifest)
---
## `PrefixSuffixTransformer` in action
```yaml
@@INCLUDE[k8s/kustomize-examples/registry-with-prefix-transformer/microservices/kustomization.yaml]
```
- However, this will transform **all** Deployments!
- Or rather, all resources with a `spec.template.spec.containers.image` field
---
## Limiting `PrefixSuffixTransformer`
<!-- ##VERSION## -->
- `PrefixSuffixTransformer` applies to *all* resources
(as of Kustomize 5.7 (October 2025), there is no way to specify a filter)
- One workaround:
- break down the app in multiple Kustomizations
- in one Kustomization, put the components that need to be transformed
- put the other components in another Kustomization
- Not great if multiple transformations need to be applied on different resources
---
## `replacements`
- Allows to entirely replace a field
- Can also replace *part* of a field (using some delimiter, e.g. `/` for images)
- Replacements can apply to *selected* resources
- Let's see an example!
---
## `replacements` in action
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
...
patches:
- patch: |-
- op: replace
path: /spec/template/spec/containers/0/image
value: alpine
target:
kind: Deployment
labelSelector: "app"
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0
```
(This replaces all images of Deployments matching the `app` selector with `alpine`.)
(Note the different `fieldPath` format, compared to the earlier transformer!)
---
## Advanced resource patching, pros and cons
## Discussion
- Very convenient to patch an arbitrary number of resources
- There are multiple ways to rewrite image references to use a registry mirror
- Very convenient to patch any kind of resource, including CRDs
- They all have pros and cons
- Doesn't support "fine-grained" patching (e.g. image registry or tag)
- Main problem: they require to enumerate the resources that we want to transform
- Once again, Helm can do it:
```yaml
image: {{ .Values.registry }}/worker:{{ .Values.version }}
```
(or to split them in a separate Kustomization)
- No (easy?) way to do something like:
"Replace all images starting by `dockercoins/` with `ghcr.io/dockercoins/`"
---
## Differences with Helm
## Inconsistencies
- Helm charts generally require more upfront work
- Sometimes it's possible to filter / select resources, sometimes not
(while kustomize "bases" are standard Kubernetes YAML)
- When it's possible, it's not always done the same way
- ... But Helm charts are also more powerful; their templating language can:
- Field paths are also different in different places:
- conditionally include/exclude resources or blocks within resources
`/spec/template/spec/containers/0/image` in JSON patches
- generate values by concatenating, hashing, transforming parameters
`spec.template.spec.containers.0.image` in replacements' `fieldPaths`
- generate values or resources by iteration (`{{ range ... }}`)
`spec/template/spec/containers/image` in transformers' `fieldSpecs`
- access the Kubernetes API during template evaluation
`.spec.template.spec.containers[].image` with tools like `jq`
- [and much more](https://helm.sh/docs/chart_template_guide/)
- `fieldPaths` also have interesting extensions, like:
`spec.template.spec.containers.[name=hello].image`
---
## Conclusions
- It's possible to do a lot of transformations with Kustomize
- In complex scenarios, it can quickly becomes a maintenance nightmare
- One possible strategy:
- keep each Kustomization as simple as possible
- compose multiple Kustomizations together
- See [this kustomization][ollama-with-sidecar] for a creative example with sidecars and more!
[ollama-with-sidecar]: https://github.com/jpetazzo/container.training/blob/main/k8s/admission-configuration.yaml
???

272
slides/k8s/templating.md Normal file
View File

@@ -0,0 +1,272 @@
# Manifest Templating
- In the Kubernetes ecosystem, we often use tools like Helm or Kustomize
- These tools are deeply integrated with CD solutions like Flux or Argo
- What do Helm and Kustomize do?
- When do we need to learn them?
- What's the difference between them?
---
## A typical Kubernetes learning curve
1. Create resources with one-line commands
(`kubectl run`, `kubectl create deployment`, `kubectl expose`...)
2. Author YAML manifests to describe these resources
3. Create resources with `kubectl apply -f`, `kubectl create -f`...
4. Combine multiple resources in a single YAML files
(making it convenient to deploy entire stacks)
5. Tweak these YAML manifests to adapt them between dev, prod, etc.
(e.g.: number of replicas, image version to use, features to enable...)
*In this section, we're going to talk about step 5 specifically!*
---
## How can we tweak YAML manifests?
- Standard UNIX tools
(e.g.: `sed`, `envsubst`... after all, YAML manifests are just text!)
- Tools designed to evaluate text templates
(e.g.: [gomplate]...)
- Tools designed to manipulate structured data like JSON or YAML
(e.g.: [jsonnet], [CUE], [ytt]...)
- Tools designed specifically to handle Kubernetes manifests
(e.g.: Helm, Kustomize...)
[gomplate]: https://github.com/hairyhenderson/gomplate
[jsonnet]: https://jsonnet.org/
[CUE]: https://github.com/cue-labs/cue-by-example/tree/main/003_kubernetes_tutorial
[ytt]: https://carvel.dev/ytt/
---
## Standard UNIX tools - `sed`
- Create YAML files with placeholders, e.g.:
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: front
spec:
rules:
- host: HOSTNAME
...
```
- Replace the placeholders:
```bash
sed 's/HOSTNAME/www.example.com/g' < ingress.yaml | kubectl apply -f-
```
- Placeholders can be delimited to avoid ambiguity (e.g. use `@@HOSTNAME@@`)
---
## Standard UNIX tools - `envsubst`
- Create YAML files with environment variables, e.g.:
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: front
spec:
rules:
- host: ${HOSTNAME}
...
```
- Evaluate the environment variables:
```bash
HOSTNAME=www.example.com envsubst < ingress.yaml | kubectl apply -f-
```
- Very convenient in pipelines where our variables are already in the environment!
---
## Text templating tools
- These are very rarely used
- ...Because Helm already relies on a text templating engine!
- Other engines are usually very similar to Helm's
(in other words: they wouldn't provide enough value vs. Helm)
- Helm has a lot of interesting Kubernetes-specific features
- So if we want to do text templating, we'll likely use Helm
---
## Tools like jsonnet, CUE, ytt
- Jsonnet: generic tool to manipulate JSON data structures
*was popular in the early days of Kubernetes, before the rise of Helm and Kustomize*
- CUE: generic engine for data validation, templating, configuration...
*powerful, but steeper learning curve*
- ytt: YAML Templating Tool
*interesting for all YAML manifests (not just Kubernetes)*
- Some teams are using these tools; feel free to have a look at them!
---
## Helm
*Combines multiple elements!*
- Text templating engine
(based on Go's [text/template] + [Sprig] + other Kubernetes )
- Templates can use "values"
(input parameters than can be provided e.g. in a structured YAML file)
- Helm will manage application lifecycle, like a package manager
(=apply manifests, keep track of what's installed, give uninstall/rollback...)
- Huge library of Helm "charts" available through the [Artifact Hub]
[text/template]: https://pkg.go.dev/text/template
[Sprig]: https://masterminds.github.io/sprig/
[Artifact Hub]: https://artifacthub.io/
---
## Kustomize
- "Kubernetes native configuration management"
- Apply transformations to existing resources
(keep YAML manifests as-is instead of "templatizing" them)
- Manipulate data structures, not YAML text representations
- Integrated with `kubectl` (with `kubectl ... -k`)
---
## Helm vs Kustomize
- Installing Helm charts:
- huge library available on the Artifact Hub
- relatively easy to get started
- hard to tweak
- Authoring Helm charts:
- manipulating YAML text representations = 💩
- but (almost) everything is possible!
- complex setups can take a lot of work
- Kustomize:
- easy to get started
- doesn't require to rewrite YAML manifests
- can apply (almost) arbitrary patches to resources
---
## Which one is best?
- Both have their use-cases
- Plain YAML is great for simple scenarios
(when there isn't anything to configure / tweak)
- Helm is great for complex situations
(lots of settings and/or settings with deep cross-cutting changes)
- Kustomize is great when we can't/won't use a Helm chart
(3rd party software without a Helm chart; or if we don't want to write one)
---
## Do we need to learn both?
*Personal recommendations / suggestions...*
- Learn how to *install* Helm charts
(a lot of Kubernetes software is available that way)
- Learn how to do basic stuff with Kustomize
(e.g. apply simple patches / replacements on existing YAML)
- If you want to distribute software for Kubernetes: probably learn to *write* charts
(a lot of people will expect your stuff to be installable / configurable with Helm)
- For internal use: pick either Helm or Kustomize and learn it well!
(check which one will work best for you depending on your use-case)
---
class: extra-details
## What about Terraform, Ansible...?
- Can we use "classic" tooling with Kubernetes?
- Yes!
- Example: Terraform / OpenTofu have "providers" for Kubernetes
([opentofu/kubernetes], [opentofu/helm])
- This lets you write HCL instead of YAML
- Kubernetes resources can integrate nicely with other TF resources
- This is great if you are already very invested in TF
- Also convenient if you're already managing e.g. secrets with TF
- Similar situation with Ansible and other tools
[opentofu/kubernetes]: https://search.opentofu.org/provider/opentofu/kubernetes/latest
[opentofu/helm]: https://search.opentofu.org/provider/opentofu/helm/latest
???
:EN:- Beyond static YAML
:EN:- Comparison of YAML templating tools
:FR:- Au-delà du YAML statique
:FR:- Analyse d'outils de production de YAML

View File

@@ -46,6 +46,7 @@ content:
- k8s/staticpods.md
- k8s/cluster-upgrade.md
- #4
- k8s/templating.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md

View File

@@ -86,6 +86,7 @@ content:
#- k8s/healthchecks-more.md
#- k8s/record.md
#- k8s/ingress-tls.md
#- k8s/templating.md
#- k8s/kustomize.md
#- k8s/helm-intro.md
#- k8s/helm-chart-format.md

View File

@@ -81,6 +81,7 @@ content:
#- k8s/helm-dependencies.md
#- k8s/helm-values-schema-validation.md
#- k8s/helm-secrets.md
#- k8s/templating.md
#- k8s/kustomize.md
#- k8s/ytt.md
#- k8s/netpol.md

View File

@@ -89,6 +89,7 @@ content:
- k8s/gateway-api.md
- k8s/cert-manager.md
- k8s/cainjector.md
- k8s/templating.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md

View File

@@ -85,6 +85,7 @@ content:
#- k8s/ingress-canary.md
#- k8s/ingress-tls.md
#- k8s/gateway-api.md
- k8s/templating.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md