Compare commits

..

1 Commits

Author SHA1 Message Date
Jérôme Petazzoni
0cb13b62cb Update README.md 2022-02-02 17:01:23 +01:00
591 changed files with 12949 additions and 32518 deletions

View File

@@ -1,26 +0,0 @@
{
"name": "container.training environment to get started with Docker and/or Kubernetes",
"image": "ghcr.io/jpetazzo/shpod",
"features": {
//"ghcr.io/devcontainers/features/common-utils:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [],
//"postCreateCommand": "... install extra packages...",
"postStartCommand": "dind.sh ; kind.sh",
// This lets us use "docker-outside-docker".
// Unfortunately, minikube, kind, etc. don't work very well that way;
// so for now, we'll likely use "docker-in-docker" instead (with a
// privilege dcontainer). But we're still exposing that socket in case
// someone wants to do something interesting with it.
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
// This is for docker-in-docker.
"privileged": true,
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
"remoteUser": "k8s"
}

23
.gitignore vendored
View File

@@ -2,23 +2,23 @@
*.swp
*~
**/terraform.tfstate
**/terraform.tfstate.backup
prepare-labs/terraform/lab-environments
prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
prepare-labs/terraform/tags
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
prepare-labs/www
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
prepare-tf/.terraform*
prepare-tf/terraform.*
prepare-tf/stage2/*.tf
prepare-tf/stage2/kubeconfig.*
prepare-tf/stage2/.terraform*
prepare-tf/stage2/terraform.*
prepare-tf/stage2/externalips.*
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
slides/_academy_*
slides/fragments
node_modules
### macOS ###
@@ -32,4 +32,3 @@ node_modules
Thumbs.db
ehthumbs.db
ehthumbs_vista.db

View File

@@ -1,5 +1,7 @@
# Container Training
(Test for livecycle)
This repository (formerly known as `orchestration-workshop`)
contains materials (slides, scripts, demo app, and other
code samples) used for various workshops, tutorials, and

View File

@@ -1,3 +1,2 @@
hostname frr
ip nht resolve-via-default
log stdout

View File

@@ -2,36 +2,30 @@ version: "3"
services:
bgpd:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine

View File

@@ -48,25 +48,20 @@ k8s_yaml('../k8s/dockercoins.yaml')
# The following line lets Tilt run with the default kubeadm cluster-admin context.
allow_k8s_contexts('kubernetes-admin@kubernetes')
# Note: the whole section below (to set up ngrok tunnels) is disabled,
# because ngrok now requires to set up an account to serve HTML
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
# but not to serve web pages like the Tilt UI.
# This will run an ngrok tunnel to expose Tilt to the outside world.
# This is intended to be used when Tilt runs on a remote machine.
local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will run an ngrok tunnel to expose Tilt to the outside world.
# # This is intended to be used when Tilt runs on a remote machine.
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
# # We send the output to /dev/tty so that it doesn't get intercepted by
# # Tilt, and gets displayed to the user's terminal instead.
# # Note: this assumes that the ngrok instance will be running on port 4040.
# # If you have other ngrok instances running on the machine, this might not work.
# local_resource(name='ngrok:showurl', cmd='''
# while sleep 1; do
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
# [ "$TUNNELS" ] && break
# done
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
# '''
# )
# This will wait until the ngrok tunnel is up, and show its URL to the user.
# We send the output to /dev/tty so that it doesn't get intercepted by
# Tilt, and gets displayed to the user's terminal instead.
# Note: this assumes that the ngrok instance will be running on port 4040.
# If you have other ngrok instances running on the machine, this might not work.
local_resource(name='ngrok:showurl', cmd='''
while sleep 1; do
TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
[ "$TUNNELS" ] && break
done
printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
'''
)

View File

@@ -1,24 +1,26 @@
services:
version: "2"
services:
rng:
build: rng
ports:
- "8001:80"
- "8001:80"
hasher:
build: hasher
ports:
- "8002:80"
- "8002:80"
webui:
build: webui
ports:
- "8000:80"
- "8000:80"
volumes:
- "./webui/files/:/files/"
- "./webui/files/:/files/"
redis:
image: redis
worker:
build: worker

View File

@@ -1,8 +1,7 @@
FROM ruby:alpine
WORKDIR /app
RUN apk add --update build-base curl
RUN gem install sinatra --version '~> 3'
RUN gem install sinatra
RUN gem install thin
COPY hasher.rb .
CMD ["ruby", "hasher.rb", "-o", "::"]
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80

View File

@@ -2,6 +2,7 @@ require 'digest'
require 'sinatra'
require 'socket'
set :bind, '0.0.0.0'
set :port, 80
post '/' do

View File

@@ -1,7 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install Flask
COPY rng.py .
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
CMD ["flask", "run"]
COPY rng.py /
CMD ["python", "rng.py"]
EXPOSE 80

View File

@@ -28,5 +28,5 @@ def rng(how_many_bytes):
if __name__ == "__main__":
app.run(port=80)
app.run(host="0.0.0.0", port=80, threaded=False)

View File

@@ -1,8 +1,7 @@
FROM node:23-alpine
WORKDIR /app
FROM node:4-slim
RUN npm install express
RUN npm install morgan
RUN npm install redis@5
COPY . .
RUN npm install redis@3
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]
EXPOSE 80

View File

@@ -1,34 +1,26 @@
import express from 'express';
import morgan from 'morgan';
import { createClient } from 'redis';
var client = await createClient({
url: "redis://redis",
socket: {
family: 0
}
})
.on("error", function (err) {
console.error("Redis error", err);
})
.connect();
var express = require('express');
var app = express();
var redis = require('redis');
app.use(morgan('common'));
var client = redis.createClient(6379, 'redis');
client.on("error", function (err) {
console.error("Redis error", err);
});
app.get('/', function (req, res) {
res.redirect('/index.html');
});
app.get('/json', async(req, res) => {
var coins = await client.hLen('wallet');
var hashes = await client.get('hashes');
var now = Date.now() / 1000;
res.json({
coins: coins,
hashes: hashes,
now: now
app.get('/json', function (req, res) {
client.hlen('wallet', function (err, coins) {
client.get('hashes', function (err, hashes) {
var now = Date.now() / 1000;
res.json( {
coins: coins,
hashes: hashes,
now: now
});
});
});
});

View File

@@ -1,6 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install redis
RUN pip install requests
COPY worker.py .
COPY worker.py /
CMD ["python", "worker.py"]

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
data:
use-forwarded-headers: true
compute-full-forwarded-for: true
use-proxy-protocol: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: ingress-nginx

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- M6-ingress-nginx-components.yaml
- sync.yaml
patches:
- path: M6-ingress-nginx-cm-patch.yaml
target:
kind: ConfigMap
- path: M6-ingress-nginx-svc-patch.yaml
target:
kind: Service

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
annotations:
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: kyverno

View File

@@ -1,72 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: flux-multi-tenancy
spec:
validationFailureAction: enforce
rules:
- name: serviceAccountName
exclude:
resources:
namespaces:
- flux-system
match:
resources:
kinds:
- Kustomization
- HelmRelease
validate:
message: ".spec.serviceAccountName is required"
pattern:
spec:
serviceAccountName: "?*"
- name: kustomizationSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- Kustomization
preconditions:
any:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"
- name: helmReleaseSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- HelmRelease
preconditions:
any:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: monitoring
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
namespace: monitoring
spec:
ingressClassName: nginx
rules:
- host: grafana.test.metal.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kube-prometheus-stack-grafana
port:
number: 80

View File

@@ -1,35 +0,0 @@
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-webui
spec:
podSelector:
matchLabels:
app: web
ingress:
- from: []
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-db
spec:
podSelector:
matchLabels:
app: db
ingress:
- from:
- podSelector:
matchLabels:
app: web

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: openebs

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: openebs
resources:
- M6-openebs-components.yaml
- sync.yaml
configMapGenerator:
- name: openebs-values
files:
- values.yaml=M6-openebs-values.yaml
configurations:
- M6-openebs-kustomizeconfig.yaml

View File

@@ -1,6 +0,0 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View File

@@ -1,15 +0,0 @@
# helm install openebs --namespace openebs openebs/openebs
# --set engines.replicated.mayastor.enabled=false
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
# --create-namespace
engines:
replicated:
mayastor:
enabled: false
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
lvm-localpv:
lvmNode:
kubeletDir: /var/lib/k0s/kubelet/
localprovisioner:
hostpathClass:
isDefaultClass: true

View File

@@ -1,38 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
namespace: rocky-test
name: rocky-full-access
rules:
- apiGroups: ["", extensions, apps]
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rocky-pv-access
rules:
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, patch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
toolkit.fluxcd.io/tenant: rocky
name: rocky-reconciler2
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rocky-pv-access
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: gotk:rocky-test:reconciler
- kind: ServiceAccount
name: rocky
namespace: rocky-test

View File

@@ -1,19 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rocky
namespace: rocky-test
spec:
ingressClassName: nginx
rules:
- host: rocky.test.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web
port:
number: 80

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base/rocky
patches:
- path: M6-rocky-test-patch.yaml
target:
kind: Kustomization

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: rocky
namespace: rocky-test
spec:
path: ./k8s/plain

View File

@@ -1,33 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 1
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- name: "80"
port: 80
selector:
app: blue

View File

@@ -9,273 +9,377 @@ metadata:
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
data:
---
apiVersion: rbac.authorization.k8s.io/v1
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: http
port: 443
targetPort: http
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: NodePort
ports:
- port: 443
targetPort: http
name: http
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- args:
- --namespace=kubernetes-dashboard
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.7.0
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --metrics-provider=none
- --enable-skip-login
- --enable-insecure-login
ports:
- name: http
containerPort: 9090
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 9090
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 9090
name: http
protocol: TCP
resources:
limits:
cpu: 2
@@ -288,42 +392,102 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding

View File

@@ -9,272 +9,376 @@ metadata:
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
data:
---
apiVersion: rbac.authorization.k8s.io/v1
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: ClusterIP
ports:
- port: 443
targetPort: https
name: https
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --metrics-provider=none
ports:
- name: https
containerPort: 8443
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
@@ -287,39 +391,99 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -9,272 +9,376 @@ metadata:
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
data:
---
apiVersion: rbac.authorization.k8s.io/v1
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: NodePort
ports:
- port: 443
targetPort: https
name: https
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --metrics-provider=none
ports:
- name: https
containerPort: 8443
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
@@ -287,42 +391,102 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@@ -344,12 +508,3 @@ metadata:
creationTimestamp: null
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin

View File

@@ -16,7 +16,8 @@ spec:
hostPath:
path: /root
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
@@ -26,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,5 +1,5 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
apiVersion: autoscaling/v2beta2
metadata:
name: rng
spec:

View File

@@ -1,12 +0,0 @@
# This removes the haproxy Deployment.
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
kind: Deployment
apiVersion: apps/v1
metadata:
name: haproxy

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
# Within a Kustomization, it is not possible to specify in which
# order transformations (patches, replacements, etc) should be
# executed. If we want to execute transformations in a specific
# order, one possibility is to put them in individual components,
# and then invoke these components in the order we want.
# It works, but it creates an extra level of indirection, which
# reduces readability and complicates maintenance.
components:
- setup
- cleanup

View File

@@ -1,20 +0,0 @@
global
#log stdout format raw local0
#daemon
maxconn 32
defaults
#log global
timeout client 1h
timeout connect 1h
timeout server 1h
mode http
option abortonclose
frontend metrics
bind :9000
http-request use-service prometheus-exporter
frontend ollama_frontend
bind :8000
default_backend ollama_backend
maxconn 16
backend ollama_backend
server ollama_server localhost:11434 check

View File

@@ -1,39 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: haproxy
name: haproxy
spec:
selector:
matchLabels:
app: haproxy
template:
metadata:
labels:
app: haproxy
spec:
volumes:
- name: haproxy
configMap:
name: haproxy
containers:
- image: haproxy:3.0
name: haproxy
volumeMounts:
- name: haproxy
mountPath: /usr/local/etc/haproxy
readinessProbe:
httpGet:
port: 9000
ports:
- name: haproxy
containerPort: 8000
- name: metrics
containerPort: 9000
resources:
requests:
cpu: 0.05
limits:
cpu: 1

View File

@@ -1,75 +0,0 @@
# This adds a sidecar to the ollama Deployment, by taking
# the pod template and volumes from the haproxy Deployment.
# The idea is to allow to run ollama+haproxy in two modes:
# - separately (each with their own Deployment),
# - together in the same Pod, sidecar-style.
# The YAML files define how to run them separetely, and this
# "replacements" directive fetches a specific volume and
# a specific container from the haproxy Deployment, to add
# them to the ollama Deployment.
#
# This would be simpler if kustomize allowed to append or
# merge lists in "replacements"; but it doesn't seem to be
# possible at the moment.
#
# It would be even better if kustomize allowed to perform
# a strategic merge using a fieldPath as the source, because
# we could merge both the containers and the volumes in a
# single operation.
#
# Note that technically, it might be possible to layer
# multiple kustomizations so that one generates the patch
# to be used in another; but it wouldn't be very readable
# or maintainable so we decided to not do that right now.
#
# However, the current approach (fetching fields one by one)
# has an advantage: it could let us transform the haproxy
# container into a real sidecar (i.e. an initContainer with
# a restartPolicy=Always).
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
- haproxy.yaml
configMapGenerator:
- name: haproxy
files:
- haproxy.cfg
replacements:
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.volumes.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.volumes.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.containers.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
targets:
- select:
kind: Service
name: ollama
fieldPaths:
- spec.ports.[name=11434].targetPort

View File

@@ -1,34 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 2
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- port: 80
selector:
app: blue

View File

@@ -1,94 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Each of these YAML files contains a Deployment and a Service.
# The blue.yaml file is here just to demonstrate that the rest
# of this Kustomization can be precisely scoped to the ollama
# Deployment (and Service): the blue Deployment and Service
# shouldn't be affected by our kustomize transformers.
resources:
- ollama.yaml
- blue.yaml
buildMetadata:
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
- managedByLabel
# Add an annotation config.kubernetes.io/origin, indicating:
# - which file defined that resource;
# - if it comes from a git repository, which one, and which
# ref (tag, branch...) it was.
- originAnnotations
# Add an annotation alpha.config.kubernetes.io/transformations
# indicating which patches and other transformers have changed
# each resource.
- transformerAnnotations
# Let's generate a ConfigMap with literal values.
# Note that this will actually add a suffix to the name of the
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
# references to the ConfigMap (e.g. in Deployment manifests)
# accordingly. The suffix is a hash of the ConfigMap contents,
# so that basically, if the ConfigMap is edited, any workload
# using that ConfigMap will automatically do a rolling update.
configMapGenerator:
- name: ollama
literals:
- "model=gemma3:270m"
- "prompt=If you visit Paris, I suggest that you"
- "queue=4"
name: ollama
patches:
# The Deployment manifest in ollama.yaml doesn't specify
# resource requests and limits, so that it can run on any
# cluster (including resource-constrained local clusters
# like KiND or minikube). The example belows add CPU
# requests and limits using a strategic merge patch.
# The patch is inlined here, but it could also be put
# in a file and referenced with "path: xxxxxx.yaml".
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
template:
spec:
containers:
- name: ollama
resources:
requests:
cpu: 1
limits:
cpu: 2
# This will have the same effect, with one little detail:
# JSON patches cannot specify containers by name, so this
# assumes that the ollama container is the first one in
# the pod template (whereas the strategic merge patch can
# use "merge keys" and identify containers by their name).
#- target:
# kind: Deployment
# name: ollama
# patch: |
# - op: add
# path: /spec/template/spec/containers/0/resources
# value:
# requests:
# cpu: 1
# limits:
# cpu: 2
# A "component" is a bit like a "base", in the sense that
# it lets us define some reusable resources and behaviors.
# There is a key different, though:
# - a "base" will be evaluated in isolation: it will
# generate+transform some resources, then these resources
# will be included in the main Kustomization;
# - a "component" has access to all the resources that
# have been generated by the main Kustomization, which
# means that it can transform them (with patches etc).
components:
- add-haproxy-sidecar

View File

@@ -1,73 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ollama
name: ollama
spec:
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
volumes:
- name: ollama
hostPath:
path: /opt/ollama
type: DirectoryOrCreate
containers:
- image: ollama/ollama
name: ollama
env:
- name: OLLAMA_MAX_QUEUE
valueFrom:
configMapKeyRef:
name: ollama
key: queue
- name: MODEL
valueFrom:
configMapKeyRef:
name: ollama
key: model
volumeMounts:
- name: ollama
mountPath: /root/.ollama
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- ollama pull $MODEL
livenessProbe:
httpGet:
port: 11434
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ollama show $MODEL
ports:
- name: ollama
containerPort: 11434
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ollama
name: ollama
spec:
ports:
- name: "11434"
port: 11434
protocol: TCP
targetPort: 11434
selector:
app: ollama
type: ClusterIP

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices
- redis

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices.yaml
transformers:
- |
apiVersion: builtin
kind: PrefixSuffixTransformer
metadata:
name: use-ghcr-io
prefix: ghcr.io/
fieldSpecs:
- path: spec/template/spec/containers/image

View File

@@ -1,125 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- redis.yaml

View File

@@ -1,35 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,30 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
# It will soon be possible to use regexes in replacement selectors,
# meaning that the "labelSelector:" above can be replaced with the
# following "name:" selector which is a tiny bit simpler:
#name: hasher|rng|webui|worker
# Regex support in replacement selectors was added by this PR:
# https://github.com/kubernetes-sigs/kustomize/pull/5863
# This PR was merged in August 2025, but as of October 2025, the
# latest release of Kustomize is 5.7.1, which was released in July.
# Hopefully the feature will be available in the next release :)
# Another possibility would be to select all Deployments, and then
# reject the one(s) for which we don't want to update the registry;
# for instance:
#reject:
# kind: Deployment
# name: redis
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: http
operator: In
value: "{{request.object.spec.ports[*].name}}"
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,34 +0,0 @@
# Note: this policy uses the operator "AnyIn", which was introduced in Kyverno 1.6.
# (This policy won't work with Kyverno 1.5!)
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[*].port}}"
operator: AnyIn
value: [ 80 ]
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
@@ -17,6 +18,5 @@ spec:
operator: NotIn
values: [ red, green, blue ]
validate:
failureAction: Enforce
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -14,14 +15,13 @@ spec:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be changed."
deny:
conditions:

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -14,13 +15,14 @@ spec:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
- key: "{{ request.object.metadata.labels.color }}"
operator: Equals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be removed."
deny: {}
deny:
conditions:

View File

@@ -1,14 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
version: v1alpha1
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz

View File

@@ -1,20 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object

View File

@@ -1,32 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
toppings:
type: array
items:
type: string

View File

@@ -1,39 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
toppings:
type: array
items:
type: string
additionalPrinterColumns:
- jsonPath: .spec.sauce
name: Sauce
type: string
- jsonPath: .spec.toppings
name: Toppings
type: string

View File

@@ -1,40 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
enum: [ red, white ]
toppings:
type: array
items:
type: string
additionalPrinterColumns:
- jsonPath: .spec.sauce
name: Sauce
type: string
- jsonPath: .spec.toppings
name: Toppings
type: string

View File

@@ -1,45 +0,0 @@
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: margherita
spec:
sauce: red
toppings:
- mozarella
- basil
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: quatrostagioni
spec:
sauce: red
toppings:
- artichoke
- basil
- mushrooms
- prosciutto
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: mehl31
spec:
sauce: white
toppings:
- goatcheese
- pear
- walnuts
- mozzarella
- rosemary
- honey
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: brownie
spec:
sauce: chocolate
toppings:
- nuts

View File

@@ -1,13 +0,0 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: my-pdb
spec:
#minAvailable: 2
#minAvailable: 90%
maxUnavailable: 1
#maxUnavailable: 10%
selector:
matchLabels:
app: my-app

View File

@@ -1,27 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: sysctl
spec:
selector:
matchLabels:
app: sysctl
template:
metadata:
labels:
app: sysctl
spec:
tolerations:
- operator: Exists
initContainers:
- name: sysctl
image: alpine
securityContext:
privileged: true
command:
- sysctl
- fs.inotify.max_user_instances=99999
containers:
- name: pause
image: registry.k8s.io/pause:3.8

87
k8s/traefik-v1.yaml Normal file
View File

@@ -0,0 +1,87 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

114
k8s/traefik-v2.yaml Normal file
View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v2.5
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --entrypoints.https.http.tls.certResolver=default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

View File

@@ -1,123 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: traefik
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik
namespace: traefik
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik
namespace: traefik
labels:
app: traefik
spec:
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
name: traefik
spec:
tolerations:
- effect: NoSchedule
operator: Exists
# If, for some reason, our CNI plugin doesn't support hostPort,
# we can enable hostNetwork instead. That should work everywhere
# but it doesn't provide the same isolation.
#hostNetwork: true
serviceAccountName: traefik
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v3.5
name: traefik
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --global.sendAnonymousUsage=true
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

1
k8s/traefik.yaml Symbolic link
View File

@@ -0,0 +1 @@
traefik-v2.yaml

View File

@@ -5,34 +5,25 @@ banner() {
echo "#"
}
create_namespace() {
namespace() {
# 'helm template --namespace ... --create-namespace'
# doesn't create the namespace, so we need to create it.
# https://github.com/helm/helm/issues/9813
echo ---
kubectl create namespace kubernetes-dashboard \
-o yaml --dry-run=client
echo ---
}
add_namespace() {
# 'helm template --namespace ...' doesn't add namespace information,
# so we do it with this convenient filter instead.
# https://github.com/helm/helm/issues/10737
kubectl create -f- -o yaml --dry-run=client --namespace kubernetes-dashboard
}
(
banner
create_namespace
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set "extraArgs={--enable-skip-login,--enable-insecure-login}" \
--set metricsScraper.enabled=true \
--set protocolHttp=true \
--set service.type=NodePort \
| add_namespace
#
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:insecure \
--clusterrole=cluster-admin \
@@ -43,23 +34,21 @@ add_namespace() {
(
banner
create_namespace
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set metricsScraper.enabled=true \
| add_namespace
#
) > dashboard-recommended.yaml
(
banner
create_namespace
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set metricsScraper.enabled=true \
--set service.type=NodePort \
| add_namespace
#
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:cluster-admin \
--clusterrole=cluster-admin \
@@ -70,15 +59,4 @@ add_namespace() {
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
-o yaml --dry-run=client \
#
echo ---
cat <<EOF
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin
EOF
) > dashboard-with-token.yaml

View File

@@ -1,164 +0,0 @@
#! Define and use variables.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ "{}/hasher:{}".format(repository, tag)
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ "{}/rng:{}".format(repository, tag)
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ "{}/webui:{}".format(repository, tag)
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ "{}/worker:{}".format(repository, tag)
name: worker

View File

@@ -1,167 +0,0 @@
#! Define and use a function to set the deployment image.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
#@ def image(component):
#@ return "{}/{}:{}".format(repository, component, tag)
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,164 +0,0 @@
#! Define and use functions, demonstrating how to generate labels.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
#@ def image(component):
#@ return "{}/{}:{}".format(repository, component, tag)
#@ end
#@ def labels(component):
#@ return {
#@ "app": component,
#@ "container.training/generated-by": "ytt",
#@ }
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("redis")
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("redis")
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("rng")
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("rng")
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("webui")
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("webui")
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("worker")
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,162 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ def image(component):
#@ return "{}/{}:{}".format(data.values.repository, component, data.values.tag)
#@ end
#@ def labels(component):
#@ return {
#@ "app": component,
#@ "container.training/generated-by": "ytt",
#@ }
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("redis")
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("redis")
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("rng")
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("rng")
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("webui")
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("webui")
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("worker")
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,54 +0,0 @@
---
#@ load("@ytt:data", "data")
---
#@ def Deployment(component, repository=data.values.repository, tag=data.values.tag):
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ component
container.training/generated-by: ytt
name: #@ component
spec:
replicas: 1
selector:
matchLabels:
app: #@ component
template:
metadata:
labels:
app: #@ component
spec:
containers:
- image: #@ repository + "/" + component + ":" + tag
name: #@ component
#@ end
---
#@ def Service(component, port=80, type="ClusterIP"):
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ component
container.training/generated-by: ytt
name: #@ component
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ component
type: #@ type
#@ end
---
--- #@ Deployment("hasher")
--- #@ Service("hasher")
--- #@ Deployment("redis", repository="library", tag="latest")
--- #@ Service("redis", port=6379)
--- #@ Deployment("rng")
--- #@ Service("rng")
--- #@ Deployment("webui")
--- #@ Service("webui", type="NodePort")
--- #@ Deployment("worker")
---

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,56 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ load("@ytt:template", "template")
---
#@ def component(name, repository=data.values.repository, tag=data.values.tag, port=None, type="ClusterIP"):
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
replicas: 1
selector:
matchLabels:
app: #@ name
template:
metadata:
labels:
app: #@ name
spec:
containers:
- image: #@ repository + "/" + name + ":" + tag
name: #@ name
#@ if/end port==80:
readinessProbe:
httpGet:
port: #@ port
#@ if port != None:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ name
type: #@ type
#@ end
#@ end
---
--- #@ template.replace(component("hasher", port=80))
--- #@ template.replace(component("redis", repository="library", tag="latest", port=6379))
--- #@ template.replace(component("rng", port=80))
--- #@ template.replace(component("webui", port=80, type="NodePort"))
--- #@ template.replace(component("worker"))
---

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,65 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ load("@ytt:template", "template")
---
#@ def component(name, repository, tag, port=None, type="ClusterIP"):
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
replicas: 1
selector:
matchLabels:
app: #@ name
template:
metadata:
labels:
app: #@ name
spec:
containers:
- image: #@ repository + "/" + name + ":" + tag
name: #@ name
#@ if/end port==80:
readinessProbe:
httpGet:
port: #@ port
#@ if port != None:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ name
type: #@ type
#@ end
#@ end
---
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
---
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component(**values))
#@ end
#@ end

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,26 +0,0 @@
#@ load("@ytt:data", "data")
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
replicas: 1
selector:
matchLabels:
app: #@ data.values.name
template:
metadata:
labels:
app: #@ data.values.name
spec:
containers:
- image: #@ data.values.repository + "/" + data.values.name + ":" + data.values.tag
name: #@ data.values.name
#@ if/end data.values.port==80:
readinessProbe:
httpGet:
port: #@ data.values.port

View File

@@ -1,7 +0,0 @@
#@data/values-schema
---
name: component
repository: dockercoins
tag: v0.1
port: 0
type: ClusterIP

View File

@@ -1,19 +0,0 @@
#@ load("@ytt:data", "data")
#@ if data.values.port > 0:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
ports:
- port: #@ data.values.port
protocol: TCP
targetPort: #@ data.values.port
selector:
app: #@ data.values.name
type: #@ data.values.type
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:library", "library")
#@ load("@ytt:template", "template")
#@
#@ component = library.get("component")
#@
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component.with_data_values(values).eval())
#@ end
#@ end

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,26 +0,0 @@
#@ load("@ytt:data", "data")
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
replicas: 1
selector:
matchLabels:
app: #@ data.values.name
template:
metadata:
labels:
app: #@ data.values.name
spec:
containers:
- image: #@ data.values.repository + "/" + data.values.name + ":" + data.values.tag
name: #@ data.values.name
#@ if/end data.values.port==80:
readinessProbe:
httpGet:
port: #@ data.values.port

View File

@@ -1,7 +0,0 @@
#@data/values-schema
---
name: component
repository: dockercoins
tag: v0.1
port: 0
type: ClusterIP

View File

@@ -1,19 +0,0 @@
#@ load("@ytt:data", "data")
#@ if data.values.port > 0:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
ports:
- port: #@ data.values.port
protocol: TCP
targetPort: #@ data.values.port
selector:
app: #@ data.values.name
type: #@ data.values.type
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:library", "library")
#@ load("@ytt:template", "template")
#@
#@ component = library.get("component")
#@
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component.with_data_values(values).eval())
#@ end
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
metadata:
name: rng
#@ end
#@overlay/match by=overlay.subset(match())
---
spec:
template:
spec:
containers:
#@overlay/match by="name"
- name: rng
readinessProbe:
httpGet:
#@overlay/match missing_ok=True
path: /1

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,25 +0,0 @@
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
metadata:
name: worker
#@ end
#! This removes the number of replicas:
#@overlay/match by=overlay.subset(match())
---
spec:
#@overlay/remove
replicas:
#! This overrides it:
#@overlay/match by=overlay.subset(match())
---
spec:
#@overlay/match missing_ok=True
replicas: 10
#! Note that it's not necessary to remove the number of replicas.
#! We're just presenting both options here (for instance, you might
#! want to remove the number of replicas if you're using an HPA).

View File

@@ -2,3 +2,4 @@
base = "slides"
publish = "slides"
command = "./build.sh once"

View File

@@ -1,222 +0,0 @@
# Tools to create lab environments
This directory contains tools to create lab environments for Docker and Kubernetes courses and workshops.
It also contains Terraform configurations that can be used stand-alone to create simple Kubernetes clusters.
Assuming that you have installed all the necessary dependencies, and placed cloud provider access tokens in the right locations, you could do, for instance:
```bash
# For a Docker course with 50 students,
# create 50 VMs on Digital Ocean.
./labctl create --students 50 --settings settings/docker.env --provider digitalocean
# For a Kubernetes training with 20 students,
# create 20 clusters of 4 VMs each using kubeadm,
# on a private Openstack cluster.
./labctl create --students 20 --settings settings/kubernetes.env --provider openstack/enix
# For a Kubernetes workshop with 80 students,
# create 80 clusters with 2 VMs each,
# using Scaleway Kapsule (managed Kubernetes).
./labctl create --students 20 --settings settings/mk8s.env --provider scaleway --mode mk8s
```
Interested? Read on!
## Software requirements
For Docker labs and Kubernetes labs based on kubeadm:
- [Parallel SSH](https://github.com/lilydjwg/pssh)
(should be installable with `pip install git+https://github.com/lilydjwg/pssh`;
on a Mac, try `brew install pssh`)
For all labs:
- Terraform
If you want to generate printable cards:
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
- [jinja2](https://pypi.python.org/pypi/Jinja2)
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
may have changed your default Python to Python 2.
You will also need an account with the cloud provider(s) that you want to use to deploy the lab environments.
## Cloud provider account(s) and credentials
These scripts create VMs or Kubernetes cluster on cloud providers, so you will need cloud provider account(s) and credentials.
Generally, we try to use the credentials stored in the configuration file used by the cloud providers CLI tools.
This means, for instance, that for Linode, if you install `linode-cli` and configure it properly, it will place your credentials in `~/.config/linode-cli`, and our Terraform configurations will try to read that file and use the credentials in it.
You don't **have to** install the CLI tools of the cloud provider(s) that you want to use; but we recommend that you do.
If you want to provide your cloud credentials through other means, you will have to adjust the Terraform configuration files in `terraform/provider-config` accordingly.
Here is where we look for credentials for each provider:
- AWS: Terraform defaults; see [AWS provider documentation][creds-aws] (for instance, you can use the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables, or AWS config and profile files)
- Azure: Terraform defaults; see [AzureRM provider documentation][creds-azure] (typically, you can authenticate with the `az` CLI and Terraform will pick it up automatically)
- Civo: CLI configuration file (`~/.civo.json`)
- Digital Ocean: CLI configuration file (`~/.config/doctl/config.yaml`)
- Exoscale: CLI configuration file (`~/.config/exoscale/exoscale.toml`)
- Google Cloud: we're using "Application Default Credentials (ADC)"; run `gcloud auth application-default login`; note that we'll use the default "project" set in `gcloud` unless you set the `GOOGLE_PROJECT` environment variable
- Hetzner: CLI configuration file (`~/.config/hcloud/cli.toml`)
- Linode: CLI configuration file (`~/.config/linode-cli`)
- OpenStack: you will need to write a tfvars file (check [that exemple](terraform/virtual-machines/openstack/tfvars.example))
- Oracle: Terraform defaults; see [OCI provider documentation][creds-oci] (for instance, you can set up API keys; or you can use a short-lived token generated by the OCI CLI with `oci session authenticate`)
- OVH: Terraform defaults; see [OVH provider documentation][creds-ovh] (this typically involves setting up 5 `OVH_...` environment variables)
- Scaleway: Terraform defaults; see [Scaleway provider documentation][creds-scw] (for instance, you can set environment variables, but it will also automatically pick up CLI authentication from `~/.config/scw/config.yaml`)
[creds-aws]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration
[creds-azure]: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure
[creds-oci]: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformproviderconfiguration.htm#authentication
[creds-ovh]: https://registry.terraform.io/providers/ovh/ovh/latest/docs#provider-configuration
[creds-scw]: https://registry.terraform.io/providers/scaleway/scaleway/latest/docs#authentication
## General Workflow
- fork/clone repo
- make sure your cloud credentials have been configured properly
- run `./labctl create ...` to create lab environments
- run `./labctl destroy ...` when you don't need the environments anymore
## Customizing things
You can edit the `settings/*.env` files, for instance to change the size of the clusters, the login or password used for the students...
Note that these files are sourced before executing any operation on a specific set of lab environments, which means that you can set Terraform variables by adding lines like the following one in the `*.env` files:
```bash
export TF_VAR_node_size=GP1.L
export TF_VAR_location=eu-north
```
## `./labctl` Usage
If you run `./labctl` without arguments, it will show a list of available commands.
### Summary of What `./labctl` Does For You
The script will create a Terraform configuration using a provider-specific template.
There are two modes: `pssh` and `mk8s`.
In `pssh` mode, students connect directly to the virtual machines using SSH.
The Terraform configuration creates a bunch of virtual machines, then the provisioning and configuration are done with `pssh`. There are a number of "steps" that are executed on the VMs, to install Docker, install a number of convenient tools, install and set up Kubernetes (if needed)... The list of "steps" to be executed is configured in the `settings/*.env` file.
In `mk8s` mode, students don't connect directly to the virtual machines. Instead, they connect to an SSH server running in a Pod (using the `jpetazzo/shpod` image), itself running on a Kubernetes cluster. The Kubernetes cluster is a managed cluster created by the Terraform configuration.
## `terraform` directory structure and principles
Legend:
- `📁` directory
- `📄` file
- `📄📄📄` multiple files
- `🌍` Terraform configuration that can be used "as-is"
```
📁terraform
├── 📁list-locations
│ └── 📄📄📄 helper scripts
│ (to list available locations for each provider)
├── 📁many-kubernetes
│ └── 📄📄📄 Terraform configuration template
│ (used in mk8s mode)
├── 📁one-kubernetes
│ │ (contains Terraform configurations that can spawn
│ │ a single Kubernetes cluster on a given provider)
│ ├── 📁🌍aws
│ ├── 📁🌍civo
│ ├── 📄common.tf
│ ├── 📁🌍digitalocean
│ └── ...
├── 📁providers
│ ├── 📁aws
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁azure
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁civo
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁digitalocean
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ └── ...
├── 📁tags
│ │ (contains Terraform configurations + other files
│ │ for a specific set of VMs or K8S clusters; these
│ │ are created by labctl)
│ ├── 📁2023-03-27-10-04-79-jp
│ ├── 📁2023-03-27-10-07-41-jp
│ ├── 📁2023-03-27-10-16-418-jp
│ └── ...
└── 📁virtual-machines
│ (contains Terraform configurations that can spawn
│ a bunch of virtual machines on a given provider)
├── 📁🌍aws
├── 📁🌍azure
├── 📄common.tf
├── 📁🌍digitalocean
└── ...
```
The directory structure can feel a bit overwhelming at first, but it's built with specific goals in mind.
**Consistent input/output between providers.** The per-provider configurations in `one-kubernetes` all take the same input variables, and provide the same output variables. Same thing for the per-provider configurations in `virtual-machines`.
**Don't repeat yourself.** As much as possible, common variables, definitions, and logic has been factored in the `common.tf` file that you can see in `one-kubernetes` and `virtual-machines`. That file is then symlinked in each provider-specific directory, to make sure that all providers use the same version of the `common.tf` file.
**Don't repeat yourself (again).** The things that are specific to each provider have been placed in the `providers` directory, and are shared between the `one-kubernetes` and the `virtual-machines` configurations. Specifically, for each provider, there is `config.tf` (which contains provider configuration, e.g. how to obtain the credentials for that provider) and `variables.tf` (which contains default values like which location and which VM size to use).
**Terraform configurations should work in `labctl` or standalone, without extra work.** The Terraform configurations (identified by 🌍 in the directory tree above) can be used directly. Just go to one of these directories, `terraform init`, `terraform apply`, and you're good to go. But they can also be used from `labctl`. `labctl` shouldn't barf out if you did a `terraform apply` in one of these directories (because it will only copy the `*.tf` files, and leave alone the other files, like the Terraform state).
The latter means that it should be easy to tweak these configurations, or create a new one, without having to use `labctl` to test it. It also means that if you want to use these configurations but don't care about `labctl`, you absolutely can!
## Miscellaneous info
### Making sure Python3 is the default (Mac only)
Check the `/usr/local/bin/python` symlink. It should be pointing to
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
instructions.
1) Verify that Python 3 is installed.
```
ls -la /usr/local/Cellar/Python
```
You should see one or more versions of Python 3. If you don't,
install it with `brew install python`.
2) Verify that `python` points to Python3.
```
ls -la /usr/local/bin/python
```
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
```
rm /usr/local/bin/python
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
# where xxxx is the most recent Python 3 version you saw above
```
### AWS specific notes
Initial assumptions are you're using a root account. If you'd like to use a IAM user, it will need the right permissions. For `pssh` mode, that includes at least `AmazonEC2FullAccess` and `IAMReadOnlyAccess`.
In `pssh` mode, the Terraform configuration currently uses the default VPC and Security Group. If you want to use another one, you'll have to make changes to `terraform/virtual-machines/aws`.
The default VPC Security Group does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`.

View File

@@ -1,33 +0,0 @@
#!/bin/sh
case "$1-$2" in
linode-lb)
linode-cli nodebalancers list --json |
jq '.[] | select(.label | startswith("ccm-")) | .id' |
xargs -n1 -P10 linode-cli nodebalancers delete
;;
linode-pvc)
linode-cli volumes list --json |
jq '.[] | select(.label | startswith("pvc")) | .id' |
xargs -n1 -P10 linode-cli volumes delete
;;
digitalocean-lb)
doctl compute load-balancer list --output json |
jq .[].id |
xargs -n1 -P10 doctl compute load-balancer delete --force
;;
digitalocean-pvc)
doctl compute volume list --output json |
jq '.[] | select(.name | startswith("pvc-")) | .id' |
xargs -n1 -P10 doctl compute volume delete --force
;;
scaleway-pvc)
scw instance volume list --output json |
jq '.[] | select(.name | contains("_pvc-")) | .id' |
xargs -n1 -P10 scw instance volume delete
;;
*)
echo "Unknown combination of provider ('$1') and resource ('$2')."
;;
esac

View File

@@ -1,63 +0,0 @@
#!/bin/sh
#set -eu
if ! command -v http >/dev/null; then
echo "Could not find the 'http' command line tool."
echo "Please install it (the package name might be 'httpie')."
exit 1
fi
. ~/creds/creds.cloudflare.dns
cloudflare() {
case "$1" in
GET|POST|DELETE)
METHOD="$1"
shift
;;
*)
METHOD=""
;;
esac
URI=$1
shift
http --ignore-stdin $METHOD https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
}
_list_zones() {
cloudflare zones?per_page=100 | jq -r .result[].name
}
_get_zone_id() {
cloudflare zones?name=$1 | jq -r .result[0].id
}
_populate_zone() {
ZONE_ID=$(_get_zone_id $1)
shift
for IPADDR in $*; do
case "$IPADDR" in
*.*) TYPE=A;;
*:*) TYPE=AAAA;;
esac
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=$TYPE" "content=$IPADDR"
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=$TYPE" "content=$IPADDR"
done
}
_clear_zone() {
ZONE_ID=$(_get_zone_id $1)
for RECORD_ID in $(
cloudflare zones/$ZONE_ID/dns_records | jq -r .result[].id
); do
cloudflare DELETE zones/$ZONE_ID/dns_records/$RECORD_ID
done
}
_add_zone() {
cloudflare zones "name=$1"
}
echo "This script is still work in progress."
echo "You can source it and then use its individual functions."

View File

@@ -1,62 +0,0 @@
#!/bin/sh
#
# Baseline resource usage per vcluster in our usecase:
# 500 MB RAM
# 10% CPU
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
# PRO2-XS = 4 core, 16 gb
#
# With vspod:
# 800 MB RAM
# 33% CPU
#
set -e
KONKTAG=konk
PROVIDER=linode
STUDENTS=5
case "$PROVIDER" in
linode)
export TF_VAR_node_size=g6-standard-6
export TF_VAR_location=fr-par
;;
scaleway)
export TF_VAR_node_size=PRO2-XS
# For tiny testing purposes, these are okay too:
#export TF_VAR_node_size=PLAY2-NANO
export TF_VAR_location=fr-par-2
;;
esac
# set kubeconfig file
export KUBECONFIG=~/kubeconfig
if [ "$PROVIDER" = "kind" ]; then
kind create cluster --name $KONKTAG
ADDRTYPE=InternalIP
else
if ! [ -f tags/$KONKTAG/stage2/kubeconfig.101 ]; then
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag $KONKTAG
fi
cp tags/$KONKTAG/stage2/kubeconfig.101 $KUBECONFIG
ADDRTYPE=ExternalIP
fi
# set external_ip labels
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="'$ADDRTYPE'")].address}{"\n"}{end}' |
while read node address ignoredaddresses; do
kubectl label node $node external_ip=$address
done
# vcluster all the things
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students $STUDENTS
# install prometheus stack because that's cool
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
--namespace prom-system --create-namespace \
kube-prometheus-stack kube-prometheus-stack
# and also fix sysctl
kubectl apply -f ../k8s/sysctl.yaml --namespace kube-system

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +0,0 @@
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

Some files were not shown because too many files have changed in this diff Show More