Compare commits

..

1 Commits

Author SHA1 Message Date
Jerome Petazzoni
8dacf00bf3 Test gitpod 2020-04-03 10:56:39 -05:00
506 changed files with 8426 additions and 81632 deletions

10
.gitignore vendored
View File

@@ -1,19 +1,9 @@
*.pyc
*.swp
*~
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
prepare-tf/.terraform*
prepare-tf/terraform.*
prepare-tf/stage2/*.tf
prepare-tf/stage2/kubeconfig.*
prepare-tf/stage2/.terraform*
prepare-tf/stage2/terraform.*
prepare-tf/stage2/externalips.*
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html

1
.gitpod.yml Normal file
View File

@@ -0,0 +1 @@
image: jpetazzo/shpod

View File

@@ -1,3 +1,2 @@
hostname frr
ip nht resolve-via-default
log stdout

View File

@@ -2,36 +2,30 @@ version: "3"
services:
bgpd:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine

View File

@@ -9,21 +9,21 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,6 +1,3 @@
# Note: hyperkube isn't available after Kubernetes 1.18.
# So we'll have to update this for Kubernetes 1.19!
version: "3"
services:
@@ -12,20 +9,20 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,72 +0,0 @@
# (1) Setting up a registry, and telling Tilt to use it.
# Tilt needs a registry to store images.
# The following manifest defines a Deployment to run a basic Docker registry,
# and a NodePort Service to access it. Using a NodePort means that we don't
# need to obtain a TLS certificate, because we will be accessing the registry
# through localhost.
k8s_yaml('../k8s/tilt-registry.yaml')
# Tell Tilt to use the registry that we just deployed instead of whatever
# is defined in our Kubernetes resources. Tilt will patch image names to
# use our registry.
default_registry('localhost:30555')
# Create a port forward so that we can access the registry from our local
# environment, too. Note that if you run Tilt directly from a Kubernetes node
# (which is not typical, but might happen in some lab/training environments)
# the following might cause an error because port 30555 is already taken.
k8s_resource(workload='tilt-registry', port_forwards='30555:5000')
# (2) Telling Tilt how to build and run our app.
# The following two lines will use the kubectl-build plugin
# to leverage buildkit and build the images in our Kubernetes
# cluster. This is not enabled by default, because it requires
# the plugin to be installed.
# See https://github.com/vmware-tanzu/buildkit-cli-for-kubectl
# for more information about this plugin.
#load('ext://kubectl_build', 'kubectl_build')
#docker_build = kubectl_build
# Our Kubernetes manifests use images 'dockercoins/...' so we tell Tilt
# how each of these images should be built. The first argument is the name
# of the image, the second argument is the directory containing the build
# context (i.e. the Dockerfile to build the image).
docker_build('dockercoins/hasher', 'hasher')
docker_build('dockercoins/rng', 'rng')
docker_build('dockercoins/webui', 'webui')
docker_build('dockercoins/worker', 'worker')
# The following manifests defines five Deployments and four Services for
# our application.
k8s_yaml('../k8s/dockercoins.yaml')
# (3) Finishing touches.
# The following line lets Tilt run with the default kubeadm cluster-admin context.
allow_k8s_contexts('kubernetes-admin@kubernetes')
# Note: the whole section below (to set up ngrok tunnels) is disabled,
# because ngrok now requires to set up an account to serve HTML
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
# but not to serve web pages like the Tilt UI.
# # This will run an ngrok tunnel to expose Tilt to the outside world.
# # This is intended to be used when Tilt runs on a remote machine.
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
# # We send the output to /dev/tty so that it doesn't get intercepted by
# # Tilt, and gets displayed to the user's terminal instead.
# # Note: this assumes that the ngrok instance will be running on port 4040.
# # If you have other ngrok instances running on the machine, this might not work.
# local_resource(name='ngrok:showurl', cmd='''
# while sleep 1; do
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
# [ "$TUNNELS" ] && break
# done
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
# '''
# )

View File

@@ -1,6 +1,6 @@
FROM node:4-slim
RUN npm install express
RUN npm install redis@3
RUN npm install redis
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]

View File

@@ -13,7 +13,7 @@
color: royalblue;
}
</style>
<script src="jquery-1.11.3.min.js"></script>
<script src="jquery.js"></script>
<script src="d3.min.js"></script>
<script src="rickshaw.min.js"></script>
<script>

1
dockercoins/webui/files/jquery.js vendored Symbolic link
View File

@@ -0,0 +1 @@
jquery-1.11.3.min.js

View File

@@ -1,8 +0,0 @@
k8s_yaml(helm(
"./path-to-chart", name="blue",
values=[], # Example: ["./path/to/values.yaml"]
set=[
"image.repository=jpetazzo/color",
"image.tag=latest",
]
))

View File

@@ -1,16 +0,0 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
kind: PodSecurityConfiguration
defaults:
enforce: baseline
audit: baseline
warn: baseline
exemptions:
usernames:
- cluster-admin
namespaces:
- kube-system

View File

@@ -1,36 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: certbot
spec:
ports:
- port: 80
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: certbot
spec:
rules:
- http:
paths:
- path: /.well-known/acme-challenge/
pathType: Prefix
backend:
service:
name: certbot
port:
number: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: certbot
subsets:
- addresses:
- ip: A.B.C.D
ports:
- port: 8000
protocol: TCP

View File

@@ -1,11 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: xyz.A.B.C.D.nip.io
spec:
secretName: xyz.A.B.C.D.nip.io
dnsNames:
- xyz.A.B.C.D.nip.io
issuerRef:
name: letsencrypt-staging
kind: ClusterIssuer

View File

@@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# Remember to update this if you use this manifest to obtain real certificates :)
email: hello@example.com
server: https://acme-staging-v02.api.letsencrypt.org/directory
# To use the production environment, use the following line instead:
#server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: issuer-letsencrypt-staging
solvers:
- http01:
ingress:
class: traefik

View File

@@ -1,6 +1,3 @@
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
# for reference, but it's not intended to be used in modern trainings.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@@ -4,13 +4,6 @@ metadata:
name: coffees.container.training
spec:
group: container.training
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
scope: Namespaced
names:
plural: coffees
@@ -18,4 +11,25 @@ spec:
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
properties:
spec:
required:
- taste
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -1,37 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
required: [ taste ]
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -9,9 +9,9 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: excelsa
name: robusta
spec:
taste: fruity
taste: stronger
---
kind: Coffee
apiVersion: container.training/v1alpha1
@@ -23,12 +23,7 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: robusta
name: excelsa
spec:
taste: stronger
bitterness: high
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: java
taste: fruity

View File

@@ -1,78 +0,0 @@
# Basic Consul cluster using Cloud Auto-Join.
# Caveats:
# - no actual persistence
# - scaling down to 1 will break the cluster
# - pods may be colocated
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"

View File

@@ -1,99 +0,0 @@
# Even better Consul cluster.
# That one uses a volumeClaimTemplate to achieve true persistence.
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
volumeMounts:
- name: data
mountPath: /consul/data
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]

View File

@@ -1,15 +1,5 @@
# Better Consul cluster.
# There is still no actual persistence, but:
# - podAntiaffinity prevents pod colocation
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: consul
rules:
@@ -21,16 +11,22 @@ rules:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
@@ -63,22 +59,20 @@ spec:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "consul:1.6"
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-retry-join=provider=k8s label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
@@ -86,4 +80,7 @@ spec:
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,340 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: http
port: 443
targetPort: http
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 9090
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 9090
name: http
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:insecure
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

View File

@@ -1,325 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume

View File

@@ -1,346 +0,0 @@
# This file was generated with the script ./update-dashboard-yaml.sh.
#
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: kubernetes-dashboard
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: kubernetes-dashboard:cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
name: cluster-admin
namespace: kubernetes-dashboard

View File

@@ -5,7 +5,7 @@ metadata:
name: fluentd
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
@@ -21,7 +21,7 @@ rules:
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:

View File

@@ -11,7 +11,7 @@ metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: elasticsearch-operator
@@ -41,7 +41,7 @@ rules:
resources: ["elasticsearchclusters"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: elasticsearch-operator
@@ -55,16 +55,13 @@ subjects:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
spec:
replicas: 1
selector:
matchLabels:
name: elasticsearch-operator
template:
metadata:
labels:

View File

@@ -1,30 +0,0 @@
kind: Event
apiVersion: v1
metadata:
generateName: hello-
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
kind: Node
apiVersion: v1
name: kind-control-plane
# Note: the uid should be the Node name (not the uid of the Node).
# This might be specific to global objects.
uid: kind-control-plane
type: Warning
reason: NodeOverheat
message: "Node temperature exceeds critical threshold"
action: Hello
source:
component: thermal-probe
#host: node1
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,36 +0,0 @@
kind: Event
apiVersion: v1
metadata:
# One convention is to use <objectname>.<timestamp>,
# where the timestamp is taken with a nanosecond
# precision and expressed in hexadecimal.
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
name: hello.1234567890
# The label doesn't serve any purpose, except making
# it easier to identify or delete that specific event.
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
### These 5 lines should be updated to refer to an object.
### Make sure to put the correct "uid", because it is what
### "kubectl describe" is using to gather relevant events.
#apiVersion: v1
#kind: Pod
#name: magic-bean
#namespace: blue
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
type: Normal
reason: BackupSuccessful
message: "Object successfully dumped to gitops repository"
source:
component: gitops-sync
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -52,7 +52,7 @@ data:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: filebeat
@@ -60,9 +60,6 @@ metadata:
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
@@ -131,7 +128,7 @@ spec:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat
@@ -144,7 +141,7 @@ roleRef:
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat

View File

@@ -1,4 +1,4 @@
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
@@ -11,4 +11,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
namespace: kube-system

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hackthecluster
spec:
selector:
matchLabels:
app: hackthecluster
template:
metadata:
labels:
app: hackthecluster
spec:
volumes:
- name: slash
hostPath:
path: /
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: alpine
image: alpine
volumeMounts:
- name: slash
mountPath: /hostfs
command:
- sleep
- infinity
securityContext:
#privileged: true
capabilities:
add:
- SYS_CHROOT

View File

@@ -27,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,16 +1,18 @@
global
daemon
maxconn 256
defaults
mode tcp
timeout connect 5s
timeout client 50s
timeout server 50s
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen very-basic-load-balancer
frontend the-frontend
bind *:80
server blue color.blue.svc:80
server green color.green.svc:80
default_backend the-backend
backend the-backend
server google.com-80 google.com:80 maxconn 32 check
server ibm.fr-80 ibm.fr:80 maxconn 32 check
# Note: the services above must exist,
# otherwise HAproxy won't start.

View File

@@ -1,29 +0,0 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2beta2
metadata:
name: rng
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: rng
minReplicas: 1
maxReplicas: 20
behavior:
scaleUp:
stabilizationWindowSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
metrics:
- type: Object
object:
describedObject:
apiVersion: v1
kind: Service
name: httplat
metric:
name: httplat_latency_seconds
target:
type: Value
value: 0.1

View File

@@ -1,20 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whatever
port:
number: 1234

View File

@@ -1,17 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1 +0,0 @@
ingress-v1beta1.yaml

13
k8s/ingress.yaml Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

360
k8s/insecure-dashboard.yaml Normal file
View File

@@ -0,0 +1,360 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-rc2
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
- --enable-skip-login
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.2
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: dashboard
name: dashboard
spec:
selector:
matchLabels:
app: dashboard
template:
metadata:
labels:
app: dashboard
spec:
containers:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
image: alpine
name: dashboard
---
apiVersion: v1
kind: Service
metadata:
labels:
app: dashboard
name: dashboard
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: dashboard
type: NodePort
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: insecure-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

View File

@@ -0,0 +1,162 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
number: 80
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: http
operator: In
value: "{{request.object.spec.ports[*].name}}"
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,34 +0,0 @@
# Note: this policy uses the operator "AnyIn", which was introduced in Kyverno 1.6.
# (This policy won't work with Kyverno 1.5!)
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[*].port}}"
operator: AnyIn
value: [ 80 ]
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,37 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
context:
- name: configmap
configMap:
name: ingress-domain-name
namespace: "{{request.object.metadata.namespace}}"
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.{{configmap.data.domain}}"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,63 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: setup-namespace
spec:
rules:
- name: setup-limitrange
match:
resources:
kinds:
- Namespace
generate:
kind: LimitRange
name: default-limitrange
namespace: "{{request.object.metadata.name}}"
data:
spec:
limits:
- type: Container
min:
cpu: 0.1
memory: 0.1
max:
cpu: 2
memory: 2Gi
default:
cpu: 0.25
memory: 500Mi
defaultRequest:
cpu: 0.25
memory: 250Mi
- name: setup-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: default-resourcequota
namespace: "{{request.object.metadata.name}}"
data:
spec:
hard:
requests.cpu: "10"
requests.memory: 10Gi
limits.cpu: "20"
limits.memory: 20Gi
- name: setup-networkpolicy
match:
resources:
kinds:
- Namespace
generate:
kind: NetworkPolicy
name: default-networkpolicy
namespace: "{{request.object.metadata.name}}"
data:
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}

View File

@@ -1,22 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
resources:
kinds:
- Pod
selector:
matchExpressions:
- key: color
operator: Exists
- key: color
operator: NotIn
values: [ red, green, blue ]
validate:
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -1,31 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
preconditions:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: ""
validate:
message: "Once label color has been added, it cannot be changed."
deny:
conditions:
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: "{{ request.oldObject.metadata.labels.color }}"

View File

@@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
preconditions:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color }}"
operator: Equals
value: ""
validate:
message: "Once label color has been added, it cannot be removed."
deny:
conditions:

View File

@@ -1,46 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: tls-for-ingress
spec:
rules:
- name: create-role
match:
resources:
kinds:
- Certificate
generate:
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
rules:
- verbs:
- get
apiGroups:
- ""
resources:
- secrets
resourceNames:
- "{{request.object.metadata.name}}"
- name: create-rolebinding
match:
resources:
kinds:
- Certificate
generate:
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{request.object.metadata.name}}"
subjects:
- kind: ServiceAccount
name: default
namespace: "{{request.object.metadata.namespace}}"

View File

@@ -1,50 +1,49 @@
# This is a local copy of:
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
rules:
- apiGroups: [ "" ]
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "endpoints", "persistentvolumes", "pods" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
@@ -63,28 +62,27 @@ spec:
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.19
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.8
imagePullPolicy: Always
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
@@ -93,7 +91,6 @@ metadata:
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
@@ -102,59 +99,12 @@ metadata:
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}

View File

@@ -1,61 +1,32 @@
# This file is https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# But with the following arguments added to metrics-server:
# args:
# - --kubelet-insecure-tls
# - --metric-resolution=5s
apiVersion: v1
kind: ServiceAccount
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
@@ -67,127 +38,101 @@ subjects:
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: Service
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --kubelet-insecure-tls
- --metric-resolution=5s
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir
mountPath: /tmp
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
- --metric-resolution=5s
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
kubernetes.io/name: "Metrics-server"
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system

View File

@@ -1,20 +0,0 @@
kind: Pod
apiVersion: v1
metadata:
generateName: mounter-
labels:
container.training/mounter: ""
spec:
volumes:
- name: pvc
persistentVolumeClaim:
claimName: my-pvc-XYZ45
containers:
- name: mounter
image: alpine
stdin: true
tty: true
volumeMounts:
- name: pvc
mountPath: /pvc
workingDir: /pvc

View File

@@ -3,7 +3,8 @@ apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
podSelector:
matchLabels:
ingress:
- from:
- podSelector: {}

View File

@@ -14,7 +14,7 @@ spec:
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: openebs-local-hostpath-pod
spec:
volumes:
- name: storage
persistentVolumeClaim:
claimName: local-hostpath-pvc
containers:
- name: better
image: alpine
command:
- sh
- -c
- |
while true; do
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
sleep $(($RANDOM % 5 + 20))
done
volumeMounts:
- mountPath: /mnt/storage
name: storage

View File

@@ -1,14 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
version: v1alpha1
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz

View File

@@ -1,20 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object

View File

@@ -1,32 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
toppings:
type: array
items:
type: string

View File

@@ -1,39 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
toppings:
type: array
items:
type: string
additionalPrinterColumns:
- jsonPath: .spec.sauce
name: Sauce
type: string
- jsonPath: .spec.toppings
name: Toppings
type: string

View File

@@ -1,40 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
enum: [ red, white ]
toppings:
type: array
items:
type: string
additionalPrinterColumns:
- jsonPath: .spec.sauce
name: Sauce
type: string
- jsonPath: .spec.toppings
name: Toppings
type: string

View File

@@ -1,45 +0,0 @@
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: margherita
spec:
sauce: red
toppings:
- mozarella
- basil
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: quatrostagioni
spec:
sauce: red
toppings:
- artichoke
- basil
- mushrooms
- prosciutto
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: mehl31
spec:
sauce: white
toppings:
- goatcheese
- pear
- walnuts
- mozzarella
- rosemary
- honey
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: brownie
spec:
sauce: chocolate
toppings:
- nuts

File diff suppressed because it is too large Load Diff

View File

@@ -22,10 +22,7 @@ spec:
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
containers:
- name: postgres
image: postgres:12
env:
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
image: postgres:11
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres

View File

@@ -1,12 +1,12 @@
---
apiVersion: policy/v1beta1
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
name: restricted
spec:
allowPrivilegeEscalation: false

View File

@@ -1,20 +0,0 @@
kind: PersistentVolume
apiVersion: v1
metadata:
generateName: my-pv-
labels:
container.training/pv: ""
spec:
accessModes:
- ReadWriteOnce
- ReadWriteMany
capacity:
storage: 1G
hostPath:
path: /tmp/my-pv
#storageClassName: my-sc
#claimRef:
# kind: PersistentVolumeClaim
# apiVersion: v1
# namespace: default
# name: my-pvc-XYZ45

View File

@@ -1,13 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
generateName: my-pvc-
labels:
container.training/pvc: ""
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1G
#storageClassName: my-sc

View File

@@ -1,147 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: blue
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: blue
name: color
namespace: blue
spec:
selector:
matchLabels:
app: rainbow
color: blue
template:
metadata:
labels:
app: rainbow
color: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: blue
name: color
namespace: blue
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: blue
type: ClusterIP
---
apiVersion: v1
kind: Namespace
metadata:
name: green
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: green
name: color
namespace: green
spec:
selector:
matchLabels:
app: rainbow
color: green
template:
metadata:
labels:
app: rainbow
color: green
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: green
name: color
namespace: green
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: green
type: ClusterIP
---
apiVersion: v1
kind: Namespace
metadata:
name: red
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: red
name: color
namespace: red
spec:
selector:
matchLabels:
app: rainbow
color: red
template:
metadata:
labels:
app: rainbow
color: red
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: red
name: color
namespace: red
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: red
type: ClusterIP

View File

@@ -1,17 +1,28 @@
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
creationTimestamp: null
generation: 1
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
spec:
replicas: 1
selector:
matchLabels:
app: socat
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: socat
spec:
@@ -23,19 +34,34 @@ spec:
image: alpine
imagePullPolicy: Always
name: socat
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
spec:
externalTrafficPolicy: Cluster
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: socat
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,42 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: tilt-registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tilt-registry
name: tilt-registry
namespace: tilt-registry
spec:
selector:
matchLabels:
app: tilt-registry
template:
metadata:
labels:
app: tilt-registry
spec:
containers:
- image: registry
name: registry
---
apiVersion: v1
kind: Service
metadata:
labels:
app: tilt-registry
name: tilt-registry
namespace: tilt-registry
spec:
ports:
- port: 5000
protocol: TCP
targetPort: 5000
nodePort: 30555
selector:
app: tilt-registry
type: NodePort

View File

@@ -1,87 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1,114 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v2.5
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --entrypoints.https.http.tls.certResolver=default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

View File

@@ -1 +0,0 @@
traefik-v2.yaml

103
k8s/traefik.yaml Normal file
View File

@@ -0,0 +1,103 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1,73 +0,0 @@
#!/bin/sh
banner() {
echo "# This file was generated with the script $0."
echo "#"
}
create_namespace() {
# 'helm template --namespace ... --create-namespace'
# doesn't create the namespace, so we need to create it.
# https://github.com/helm/helm/issues/9813
echo ---
kubectl create namespace kubernetes-dashboard \
-o yaml --dry-run=client
echo ---
}
add_namespace() {
# 'helm template --namespace ...' doesn't add namespace information,
# so we do it with this convenient filter instead.
# https://github.com/helm/helm/issues/10737
kubectl create -f- -o yaml --dry-run=client --namespace kubernetes-dashboard
}
(
banner
create_namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set "extraArgs={--enable-skip-login,--enable-insecure-login}" \
--set metricsScraper.enabled=true \
--set protocolHttp=true \
--set service.type=NodePort \
| add_namespace
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:insecure \
--clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:kubernetes-dashboard \
-o yaml --dry-run=client \
#
) > dashboard-insecure.yaml
(
banner
create_namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set metricsScraper.enabled=true \
| add_namespace
) > dashboard-recommended.yaml
(
banner
create_namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set metricsScraper.enabled=true \
--set service.type=NodePort \
| add_namespace
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:cluster-admin \
--clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:cluster-admin \
-o yaml --dry-run=client \
#
echo ---
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
-o yaml --dry-run=client \
#
) > dashboard-with-token.yaml

View File

@@ -8,24 +8,24 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: user=jean.doe
name: users:jean.doe
rules:
- apiGroups: [ certificates.k8s.io ]
resources: [ certificatesigningrequests ]
verbs: [ create ]
- apiGroups: [ certificates.k8s.io ]
resourceNames: [ user=jean.doe ]
resourceNames: [ users:jean.doe ]
resources: [ certificatesigningrequests ]
verbs: [ get, create, delete, watch ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: user=jean.doe
name: users:jean.doe
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: user=jean.doe
name: users:jean.doe
subjects:
- kind: ServiceAccount
name: jean.doe

View File

@@ -3,6 +3,8 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node2
annotations:
node: node2
spec:
capacity:
storage: 10Gi
@@ -24,6 +26,8 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node3
annotations:
node: node3
spec:
capacity:
storage: 10Gi
@@ -45,6 +49,8 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node4
annotations:
node: node4
spec:
capacity:
storage: 10Gi

View File

@@ -1,164 +0,0 @@
#! Define and use variables.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ "{}/hasher:{}".format(repository, tag)
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ "{}/rng:{}".format(repository, tag)
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ "{}/webui:{}".format(repository, tag)
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ "{}/worker:{}".format(repository, tag)
name: worker

View File

@@ -1,167 +0,0 @@
#! Define and use a function to set the deployment image.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
#@ def image(component):
#@ return "{}/{}:{}".format(repository, component, tag)
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,164 +0,0 @@
#! Define and use functions, demonstrating how to generate labels.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
#@ def image(component):
#@ return "{}/{}:{}".format(repository, component, tag)
#@ end
#@ def labels(component):
#@ return {
#@ "app": component,
#@ "container.training/generated-by": "ytt",
#@ }
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("redis")
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("redis")
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("rng")
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("rng")
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("webui")
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("webui")
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("worker")
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,162 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ def image(component):
#@ return "{}/{}:{}".format(data.values.repository, component, data.values.tag)
#@ end
#@ def labels(component):
#@ return {
#@ "app": component,
#@ "container.training/generated-by": "ytt",
#@ }
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("redis")
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("redis")
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("rng")
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("rng")
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("webui")
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("webui")
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("worker")
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,54 +0,0 @@
---
#@ load("@ytt:data", "data")
---
#@ def Deployment(component, repository=data.values.repository, tag=data.values.tag):
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ component
container.training/generated-by: ytt
name: #@ component
spec:
replicas: 1
selector:
matchLabels:
app: #@ component
template:
metadata:
labels:
app: #@ component
spec:
containers:
- image: #@ repository + "/" + component + ":" + tag
name: #@ component
#@ end
---
#@ def Service(component, port=80, type="ClusterIP"):
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ component
container.training/generated-by: ytt
name: #@ component
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ component
type: #@ type
#@ end
---
--- #@ Deployment("hasher")
--- #@ Service("hasher")
--- #@ Deployment("redis", repository="library", tag="latest")
--- #@ Service("redis", port=6379)
--- #@ Deployment("rng")
--- #@ Service("rng")
--- #@ Deployment("webui")
--- #@ Service("webui", type="NodePort")
--- #@ Deployment("worker")
---

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,56 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ load("@ytt:template", "template")
---
#@ def component(name, repository=data.values.repository, tag=data.values.tag, port=None, type="ClusterIP"):
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
replicas: 1
selector:
matchLabels:
app: #@ name
template:
metadata:
labels:
app: #@ name
spec:
containers:
- image: #@ repository + "/" + name + ":" + tag
name: #@ name
#@ if/end port==80:
readinessProbe:
httpGet:
port: #@ port
#@ if port != None:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ name
type: #@ type
#@ end
#@ end
---
--- #@ template.replace(component("hasher", port=80))
--- #@ template.replace(component("redis", repository="library", tag="latest", port=6379))
--- #@ template.replace(component("rng", port=80))
--- #@ template.replace(component("webui", port=80, type="NodePort"))
--- #@ template.replace(component("worker"))
---

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,65 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ load("@ytt:template", "template")
---
#@ def component(name, repository, tag, port=None, type="ClusterIP"):
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
replicas: 1
selector:
matchLabels:
app: #@ name
template:
metadata:
labels:
app: #@ name
spec:
containers:
- image: #@ repository + "/" + name + ":" + tag
name: #@ name
#@ if/end port==80:
readinessProbe:
httpGet:
port: #@ port
#@ if port != None:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ name
type: #@ type
#@ end
#@ end
---
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
---
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component(**values))
#@ end
#@ end

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,26 +0,0 @@
#@ load("@ytt:data", "data")
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
replicas: 1
selector:
matchLabels:
app: #@ data.values.name
template:
metadata:
labels:
app: #@ data.values.name
spec:
containers:
- image: #@ data.values.repository + "/" + data.values.name + ":" + data.values.tag
name: #@ data.values.name
#@ if/end data.values.port==80:
readinessProbe:
httpGet:
port: #@ data.values.port

View File

@@ -1,7 +0,0 @@
#@data/values-schema
---
name: component
repository: dockercoins
tag: v0.1
port: 0
type: ClusterIP

View File

@@ -1,19 +0,0 @@
#@ load("@ytt:data", "data")
#@ if data.values.port > 0:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
ports:
- port: #@ data.values.port
protocol: TCP
targetPort: #@ data.values.port
selector:
app: #@ data.values.name
type: #@ data.values.type
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:library", "library")
#@ load("@ytt:template", "template")
#@
#@ component = library.get("component")
#@
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component.with_data_values(values).eval())
#@ end
#@ end

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,26 +0,0 @@
#@ load("@ytt:data", "data")
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
replicas: 1
selector:
matchLabels:
app: #@ data.values.name
template:
metadata:
labels:
app: #@ data.values.name
spec:
containers:
- image: #@ data.values.repository + "/" + data.values.name + ":" + data.values.tag
name: #@ data.values.name
#@ if/end data.values.port==80:
readinessProbe:
httpGet:
port: #@ data.values.port

View File

@@ -1,7 +0,0 @@
#@data/values-schema
---
name: component
repository: dockercoins
tag: v0.1
port: 0
type: ClusterIP

View File

@@ -1,19 +0,0 @@
#@ load("@ytt:data", "data")
#@ if data.values.port > 0:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
ports:
- port: #@ data.values.port
protocol: TCP
targetPort: #@ data.values.port
selector:
app: #@ data.values.name
type: #@ data.values.type
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:library", "library")
#@ load("@ytt:template", "template")
#@
#@ component = library.get("component")
#@
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component.with_data_values(values).eval())
#@ end
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
metadata:
name: rng
#@ end
#@overlay/match by=overlay.subset(match())
---
spec:
template:
spec:
containers:
#@overlay/match by="name"
- name: rng
readinessProbe:
httpGet:
#@overlay/match missing_ok=True
path: /1

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

Some files were not shown because too many files have changed in this diff Show More