mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-02 17:30:20 +00:00
Compare commits
85 Commits
2021-06-mb
...
2021-11-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
549c8f5eaf | ||
|
|
92cd81b170 | ||
|
|
d9e29eb4a4 | ||
|
|
00b167207d | ||
|
|
d34017cff1 | ||
|
|
d53ba51a9f | ||
|
|
90ce84ace3 | ||
|
|
bcbfc747a2 | ||
|
|
2f83b7f256 | ||
|
|
753324cc89 | ||
|
|
97735bafa8 | ||
|
|
faabbb13ae | ||
|
|
10b16ce9e9 | ||
|
|
055c0a304f | ||
|
|
956b079c58 | ||
|
|
603d18ba2d | ||
|
|
ce9fea2b23 | ||
|
|
4b500cd37e | ||
|
|
207845f15a | ||
|
|
f3ee3a923e | ||
|
|
f4837d9e28 | ||
|
|
a3a8c824c8 | ||
|
|
7fc2d5e57f | ||
|
|
b3bc5ccb9b | ||
|
|
fcf0296245 | ||
|
|
cc1340801e | ||
|
|
538d3212e4 | ||
|
|
ec31c1fa17 | ||
|
|
b9041d3d39 | ||
|
|
ce0ae6e35b | ||
|
|
3b9a2113a5 | ||
|
|
b67691c7e7 | ||
|
|
081380fda8 | ||
|
|
e9e3fae21f | ||
|
|
03c0abb182 | ||
|
|
372146a114 | ||
|
|
e7a74769b5 | ||
|
|
2e096d85c7 | ||
|
|
acbe355f1e | ||
|
|
733166fbd7 | ||
|
|
5f0a57477d | ||
|
|
a85c8a3240 | ||
|
|
c9820376ff | ||
|
|
72c02c6fcf | ||
|
|
181844ebea | ||
|
|
79fe6c1f5c | ||
|
|
7180e832fe | ||
|
|
ae74d9069f | ||
|
|
8fed7a8adb | ||
|
|
f9c7be9697 | ||
|
|
d267ca1632 | ||
|
|
c9e93540ba | ||
|
|
f4345d3241 | ||
|
|
97e8106669 | ||
|
|
54b6948eeb | ||
|
|
ce29289bed | ||
|
|
7801fc5131 | ||
|
|
b260ad8482 | ||
|
|
61bd320363 | ||
|
|
47766be4b2 | ||
|
|
fb8efbe29f | ||
|
|
ca0c721ba0 | ||
|
|
1500b5937d | ||
|
|
6e1a9925ea | ||
|
|
b7dd363ccd | ||
|
|
c5cd84e274 | ||
|
|
108f936f84 | ||
|
|
3594fef67a | ||
|
|
021929e50e | ||
|
|
e3fa685ee1 | ||
|
|
4f662d14cc | ||
|
|
d956da1733 | ||
|
|
1b820f3bc1 | ||
|
|
f1d4704b0e | ||
|
|
71423233bd | ||
|
|
b508360227 | ||
|
|
7cd47243ab | ||
|
|
a9d84b01d8 | ||
|
|
4df547d9b1 | ||
|
|
d14f86e683 | ||
|
|
92cdb4146b | ||
|
|
0ca798bc30 | ||
|
|
8025d37188 | ||
|
|
3318ce84e4 | ||
|
|
3e29881ece |
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,9 +1,19 @@
|
||||
*.pyc
|
||||
*.swp
|
||||
*~
|
||||
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
|
||||
prepare-tf/.terraform*
|
||||
prepare-tf/terraform.*
|
||||
prepare-tf/stage2/*.tf
|
||||
prepare-tf/stage2/kubeconfig.*
|
||||
prepare-tf/stage2/.terraform*
|
||||
prepare-tf/stage2/terraform.*
|
||||
prepare-tf/stage2/externalips.*
|
||||
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
|
||||
@@ -1,39 +1,4 @@
|
||||
k8s_yaml(blob('''
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
containers:
|
||||
- image: registry
|
||||
name: registry
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
targetPort: 5000
|
||||
nodePort: 30555
|
||||
selector:
|
||||
app: registry
|
||||
type: NodePort
|
||||
'''))
|
||||
k8s_yaml('../k8s/tilt-registry.yaml')
|
||||
default_registry('localhost:30555')
|
||||
docker_build('dockercoins/hasher', 'hasher')
|
||||
docker_build('dockercoins/rng', 'rng')
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
color: royalblue;
|
||||
}
|
||||
</style>
|
||||
<script src="jquery.js"></script>
|
||||
<script src="jquery-1.11.3.min.js"></script>
|
||||
<script src="d3.min.js"></script>
|
||||
<script src="rickshaw.min.js"></script>
|
||||
<script>
|
||||
|
||||
1
dockercoins/webui/files/jquery.js
vendored
1
dockercoins/webui/files/jquery.js
vendored
@@ -1 +0,0 @@
|
||||
jquery-1.11.3.min.js
|
||||
8
k8s/Tiltfile.helmchart
Normal file
8
k8s/Tiltfile.helmchart
Normal file
@@ -0,0 +1,8 @@
|
||||
k8s_yaml(helm(
|
||||
"./path-to-chart", name="blue",
|
||||
values=[], # Example: ["./path/to/values.yaml"]
|
||||
set=[
|
||||
"image.repository=jpetazzo/color",
|
||||
"image.tag=latest",
|
||||
]
|
||||
))
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: certbot
|
||||
@@ -16,9 +16,12 @@ spec:
|
||||
- http:
|
||||
paths:
|
||||
- path: /.well-known/acme-challenge/
|
||||
pathType: Prefix
|
||||
backend:
|
||||
serviceName: certbot
|
||||
servicePort: 80
|
||||
service:
|
||||
name: certbot
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
|
||||
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
|
||||
# for reference, but it's not intended to be used in modern trainings.
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
|
||||
@@ -8,6 +8,9 @@ spec:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds the "skip login" flag, as well as an insecure hack to defeat SSL.
|
||||
# As its name implies, it is INSECURE and you should not use it in production,
|
||||
# or on clusters that contain any kind of important or sensitive data, or on
|
||||
# clusters that have a life span of more than a few hours.
|
||||
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -19,93 +25,192 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# kubernetes-dashboard-certs
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# kubernetes-dashboard-csrf
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# kubernetes-dashboard-key-holder
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/configmap.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
data:
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: Role
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: "kubernetes-dashboard-metrics"
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: "kubernetes-dashboard-metrics"
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/role.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
@@ -124,30 +229,32 @@ rules:
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/rolebinding.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -156,207 +263,237 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --enable-skip-login
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/service.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: http
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/deployment.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: "kubernetesui/dashboard:v2.3.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --metrics-provider=none
|
||||
- --enable-skip-login
|
||||
- --enable-insecure-login
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 9090
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/ingress.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/pdb.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/psp.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: insecure-dashboard
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard:insecure
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
# This is a copy of the following file:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -15,93 +25,192 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# kubernetes-dashboard-certs
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# kubernetes-dashboard-csrf
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# kubernetes-dashboard-key-holder
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/configmap.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
data:
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: Role
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: "kubernetes-dashboard-metrics"
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: "kubernetes-dashboard-metrics"
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/role.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
@@ -120,30 +229,32 @@ rules:
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/rolebinding.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -152,154 +263,227 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/service.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
- port: 443
|
||||
targetPort: https
|
||||
name: https
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/deployment.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: "kubernetesui/dashboard:v2.3.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --metrics-provider=none
|
||||
ports:
|
||||
- name: https
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/ingress.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/pdb.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/psp.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds a ServiceAccount that has cluster-admin privileges on the cluster,
|
||||
# and exposes the dashboard on a NodePort. It makes it easier to do quick demos
|
||||
# of the Kubernetes dashboard, without compromising the security too much.
|
||||
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -18,94 +25,192 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# kubernetes-dashboard-certs
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# kubernetes-dashboard-csrf
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
# Source: kubernetes-dashboard/templates/secret.yaml
|
||||
# kubernetes-dashboard-key-holder
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/configmap.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
data:
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: Role
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: "kubernetes-dashboard-metrics"
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: "kubernetes-dashboard-metrics"
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/role.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
@@ -124,30 +229,32 @@ rules:
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/rolebinding.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
labels:
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -156,176 +263,236 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/service.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
- port: 443
|
||||
targetPort: https
|
||||
name: https
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/deployment.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
helm.sh/chart: kubernetes-dashboard-5.0.2
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/version: "2.3.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: "kubernetesui/dashboard:v2.3.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --metrics-provider=none
|
||||
ports:
|
||||
- name: https
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/ingress.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/pdb.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
# Source: kubernetes-dashboard/templates/psp.yaml
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-cluster-admin
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard:cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -334,3 +501,10 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
@@ -11,11 +11,21 @@ spec:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
preconditions:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEqual
|
||||
value: "{{ request.object.metadata.labels.color }}"
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: "{{ request.oldObject.metadata.labels.color }}"
|
||||
|
||||
|
||||
@@ -6,20 +6,23 @@ spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-removal
|
||||
- name: prevent-color-change
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: DoesNotExist
|
||||
preconditions:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: Equals
|
||||
value: ""
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotIn
|
||||
value: []
|
||||
|
||||
|
||||
46
k8s/kyverno-tls-for-ingress.yaml
Normal file
46
k8s/kyverno-tls-for-ingress.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tls-for-ingress
|
||||
spec:
|
||||
rules:
|
||||
- name: create-role
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Certificate
|
||||
generate:
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
rules:
|
||||
- verbs:
|
||||
- get
|
||||
apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
resourceNames:
|
||||
- "{{request.object.metadata.name}}"
|
||||
- name: create-rolebinding
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Certificate
|
||||
generate:
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: "{{request.object.metadata.name}}"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
42
k8s/tilt-registry.yaml
Normal file
42
k8s/tilt-registry.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: tilt-registry
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: tilt-registry
|
||||
name: tilt-registry
|
||||
namespace: tilt-registry
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: tilt-registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: tilt-registry
|
||||
spec:
|
||||
containers:
|
||||
- image: registry
|
||||
name: registry
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: tilt-registry
|
||||
name: tilt-registry
|
||||
namespace: tilt-registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
targetPort: 5000
|
||||
nodePort: 30555
|
||||
selector:
|
||||
app: tilt-registry
|
||||
type: NodePort
|
||||
@@ -29,12 +29,15 @@ spec:
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
- image: traefik:v2.5
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
@@ -100,3 +103,12 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: IngressClass
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
spec:
|
||||
controller: traefik.io/ingress-controller
|
||||
|
||||
62
k8s/update-dashboard-yaml.sh
Executable file
62
k8s/update-dashboard-yaml.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/bin/sh
|
||||
|
||||
banner() {
|
||||
echo "# This file was generated with the script $0."
|
||||
echo "#"
|
||||
}
|
||||
|
||||
namespace() {
|
||||
# 'helm template --namespace ... --create-namespace'
|
||||
# doesn't create the namespace, so we need to create it.
|
||||
echo ---
|
||||
kubectl create namespace kubernetes-dashboard \
|
||||
-o yaml --dry-run=client
|
||||
echo ---
|
||||
}
|
||||
|
||||
(
|
||||
banner
|
||||
namespace
|
||||
helm template kubernetes-dashboard kubernetes-dashboard \
|
||||
--repo https://kubernetes.github.io/dashboard/ \
|
||||
--create-namespace --namespace kubernetes-dashboard \
|
||||
--set "extraArgs={--enable-skip-login,--enable-insecure-login}" \
|
||||
--set protocolHttp=true \
|
||||
--set service.type=NodePort \
|
||||
#
|
||||
echo ---
|
||||
kubectl create clusterrolebinding kubernetes-dashboard:insecure \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kubernetes-dashboard:kubernetes-dashboard \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
) > dashboard-insecure.yaml
|
||||
|
||||
(
|
||||
banner
|
||||
namespace
|
||||
helm template kubernetes-dashboard kubernetes-dashboard \
|
||||
--repo https://kubernetes.github.io/dashboard/ \
|
||||
--create-namespace --namespace kubernetes-dashboard \
|
||||
#
|
||||
) > dashboard-recommended.yaml
|
||||
|
||||
(
|
||||
banner
|
||||
namespace
|
||||
helm template kubernetes-dashboard kubernetes-dashboard \
|
||||
--repo https://kubernetes.github.io/dashboard/ \
|
||||
--create-namespace --namespace kubernetes-dashboard \
|
||||
--set service.type=NodePort \
|
||||
#
|
||||
echo ---
|
||||
kubectl create clusterrolebinding kubernetes-dashboard:cluster-admin \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kubernetes-dashboard:cluster-admin \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
echo ---
|
||||
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
) > dashboard-with-token.yaml
|
||||
76
prepare-tf/README.md
Normal file
76
prepare-tf/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
This directory contains a Terraform configuration to deploy
|
||||
a bunch of Kubernetes clusters on various cloud providers, using their respective managed Kubernetes products.
|
||||
|
||||
To use it:
|
||||
|
||||
1. Select the provider you wish to use.
|
||||
|
||||
Change the `source` attribute of the `module "clusters"` section.
|
||||
Check the content of the `modules` directory to see available choices.
|
||||
|
||||
```bash
|
||||
vim main.tf
|
||||
```
|
||||
|
||||
2. Initialize the provider.
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
```
|
||||
|
||||
3. Configure provider authentication.
|
||||
|
||||
- Digital Ocean: `export DIGITALOCEAN_ACCESS_TOKEN=...`
|
||||
(check `~/.config/doctl/config.yaml` for the token)
|
||||
- Linode: `export LINODE_TOKEN=...`
|
||||
(check `~/.config/linode-cli` for the token)
|
||||
- Oracle Cloud: it should use `~/.oci/config`
|
||||
- Scaleway: run `scw init`
|
||||
|
||||
4. Decide how many clusters and how many nodes per clusters you want.
|
||||
|
||||
```bash
|
||||
export TF_VAR_how_many_clusters=5
|
||||
export TF_VAR_min_nodes_per_pool=2
|
||||
# Optional (will enable autoscaler when available)
|
||||
export TF_VAR_max_nodes_per_pool=4
|
||||
# Optional (will only work on some providers)
|
||||
export TF_VAR_enable_arm_pool=true
|
||||
```
|
||||
|
||||
5. Provision clusters.
|
||||
|
||||
```bash
|
||||
terraform apply
|
||||
```
|
||||
|
||||
6. Perform second stage provisioning.
|
||||
|
||||
This will install a SSH server on the clusters.
|
||||
|
||||
```bash
|
||||
cd stage2
|
||||
terraform init
|
||||
terraform apply
|
||||
```
|
||||
|
||||
7. Obtain cluster connection information.
|
||||
|
||||
The following command shows connection information, one cluster per line, ready to copy-paste in a shared document or spreadsheet.
|
||||
|
||||
```bash
|
||||
terraform output -json | jq -r 'to_entries[].value.value'
|
||||
```
|
||||
|
||||
8. Destroy clusters.
|
||||
|
||||
```bash
|
||||
cd ..
|
||||
terraform destroy
|
||||
```
|
||||
|
||||
9. Clean up stage2.
|
||||
|
||||
```bash
|
||||
rm stage/terraform.tfstate*
|
||||
```
|
||||
16
prepare-tf/locals.tf
Normal file
16
prepare-tf/locals.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
resource "random_string" "_" {
|
||||
length = 5
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
resource "time_static" "_" {}
|
||||
|
||||
locals {
|
||||
tag = format("tf-%s-%s", formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339), random_string._.result)
|
||||
# Common tags to be assigned to all resources
|
||||
common_tags = [
|
||||
"created-by=terraform",
|
||||
"tag=${local.tag}"
|
||||
]
|
||||
}
|
||||
77
prepare-tf/main.tf
Normal file
77
prepare-tf/main.tf
Normal file
@@ -0,0 +1,77 @@
|
||||
module "clusters" {
|
||||
source = "./modules/linode"
|
||||
for_each = local.clusters
|
||||
cluster_name = each.value.cluster_name
|
||||
min_nodes_per_pool = var.min_nodes_per_pool
|
||||
max_nodes_per_pool = var.max_nodes_per_pool
|
||||
enable_arm_pool = var.enable_arm_pool
|
||||
node_size = var.node_size
|
||||
common_tags = local.common_tags
|
||||
}
|
||||
|
||||
locals {
|
||||
clusters = {
|
||||
for i in range(101, 101 + var.how_many_clusters) :
|
||||
i => {
|
||||
cluster_name = format("%s-%03d", local.tag, i)
|
||||
kubeconfig_path = format("./stage2/kubeconfig.%03d", i)
|
||||
#dashdash_kubeconfig = format("--kubeconfig=./stage2/kubeconfig.%03d", i)
|
||||
externalips_path = format("./stage2/externalips.%03d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "stage2" {
|
||||
filename = "./stage2/main.tf"
|
||||
file_permission = "0644"
|
||||
content = templatefile(
|
||||
"./stage2.tmpl",
|
||||
{ clusters = local.clusters }
|
||||
)
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
for_each = local.clusters
|
||||
filename = each.value.kubeconfig_path
|
||||
file_permission = "0600"
|
||||
content = module.clusters[each.key].kubeconfig
|
||||
}
|
||||
|
||||
resource "local_file" "externalips" {
|
||||
for_each = local.clusters
|
||||
filename = each.value.externalips_path
|
||||
file_permission = "0600"
|
||||
content = data.external.externalips[each.key].result.externalips
|
||||
}
|
||||
|
||||
resource "null_resource" "wait_for_nodes" {
|
||||
for_each = local.clusters
|
||||
provisioner "local-exec" {
|
||||
environment = {
|
||||
KUBECONFIG = local_file.kubeconfig[each.key].filename
|
||||
}
|
||||
command = <<-EOT
|
||||
set -e
|
||||
kubectl get nodes --watch | grep --silent --line-buffered .
|
||||
kubectl wait node --for=condition=Ready --all --timeout=10m
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
data "external" "externalips" {
|
||||
for_each = local.clusters
|
||||
depends_on = [ null_resource.wait_for_nodes ]
|
||||
program = [
|
||||
"sh",
|
||||
"-c",
|
||||
<<-EOT
|
||||
set -e
|
||||
cat >/dev/null
|
||||
export KUBECONFIG=${local_file.kubeconfig[each.key].filename}
|
||||
echo -n '{"externalips": "'
|
||||
kubectl get nodes \
|
||||
-o 'jsonpath={.items[*].status.addresses[?(@.type=="ExternalIP")].address}'
|
||||
echo -n '"}'
|
||||
EOT
|
||||
]
|
||||
}
|
||||
16
prepare-tf/modules/digitalocean/main.tf
Normal file
16
prepare-tf/modules/digitalocean/main.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
resource "digitalocean_kubernetes_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
tags = local.common_tags
|
||||
region = var.region
|
||||
version = var.k8s_version
|
||||
|
||||
node_pool {
|
||||
name = "dok-x86"
|
||||
tags = local.common_tags
|
||||
size = local.node_type
|
||||
auto_scale = true
|
||||
min_nodes = var.min_nodes_per_pool
|
||||
max_nodes = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
}
|
||||
|
||||
}
|
||||
7
prepare-tf/modules/digitalocean/outputs.tf
Normal file
7
prepare-tf/modules/digitalocean/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "kubeconfig" {
|
||||
value = digitalocean_kubernetes_cluster._.kube_config.0.raw_config
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = digitalocean_kubernetes_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/digitalocean/providers.tf
Normal file
8
prepare-tf/modules/digitalocean/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "2.12.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
59
prepare-tf/modules/digitalocean/variables.tf
Normal file
59
prepare-tf/modules/digitalocean/variables.tf
Normal file
@@ -0,0 +1,59 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
locals {
|
||||
common_tags = [for tag in var.common_tags : replace(tag, "=", "-")]
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "s-1vcpu-2gb"
|
||||
"M" = "s-2vcpu-4gb"
|
||||
"L" = "s-4vcpu-8gb"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
default = "ams3"
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# doctl kubernetes options versions -o json | jq -r .[].slug
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.21.5-do.0"
|
||||
}
|
||||
16
prepare-tf/modules/linode/main.tf
Normal file
16
prepare-tf/modules/linode/main.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
resource "linode_lke_cluster" "_" {
|
||||
label = var.cluster_name
|
||||
tags = var.common_tags
|
||||
region = var.region
|
||||
k8s_version = var.k8s_version
|
||||
|
||||
pool {
|
||||
type = local.node_type
|
||||
count = var.min_nodes_per_pool
|
||||
autoscaler {
|
||||
min = var.min_nodes_per_pool
|
||||
max = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
7
prepare-tf/modules/linode/outputs.tf
Normal file
7
prepare-tf/modules/linode/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "kubeconfig" {
|
||||
value = base64decode(linode_lke_cluster._.kubeconfig)
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = linode_lke_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/linode/providers.tf
Normal file
8
prepare-tf/modules/linode/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
linode = {
|
||||
source = "linode/linode"
|
||||
version = "1.22.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
57
prepare-tf/modules/linode/variables.tf
Normal file
57
prepare-tf/modules/linode/variables.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "g6-standard-1"
|
||||
"M" = "g6-standard-2"
|
||||
"L" = "g6-standard-4"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# linode-cli regions list
|
||||
variable "region" {
|
||||
type = string
|
||||
default = "us-east"
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# linode-cli lke versions-list --json | jq -r .[].id
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.21"
|
||||
}
|
||||
58
prepare-tf/modules/oraclecloud/main.tf
Normal file
58
prepare-tf/modules/oraclecloud/main.tf
Normal file
@@ -0,0 +1,58 @@
|
||||
resource "oci_identity_compartment" "_" {
|
||||
name = var.cluster_name
|
||||
description = var.cluster_name
|
||||
}
|
||||
|
||||
locals {
|
||||
compartment_id = oci_identity_compartment._.id
|
||||
}
|
||||
|
||||
data "oci_identity_availability_domains" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
}
|
||||
|
||||
data "oci_core_images" "_" {
|
||||
for_each = local.pools
|
||||
compartment_id = local.compartment_id
|
||||
operating_system = "Oracle Linux"
|
||||
operating_system_version = "7.9"
|
||||
shape = each.value.shape
|
||||
}
|
||||
|
||||
resource "oci_containerengine_cluster" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
kubernetes_version = var.k8s_version
|
||||
name = "tf-oke"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
options {
|
||||
service_lb_subnet_ids = [oci_core_subnet.loadbalancers.id]
|
||||
}
|
||||
endpoint_config {
|
||||
is_public_ip_enabled = true
|
||||
subnet_id = oci_core_subnet.controlplane.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_containerengine_node_pool" "_" {
|
||||
for_each = local.pools
|
||||
cluster_id = oci_containerengine_cluster._.id
|
||||
compartment_id = local.compartment_id
|
||||
kubernetes_version = var.k8s_version
|
||||
name = each.key
|
||||
node_shape = each.value.shape
|
||||
node_shape_config {
|
||||
memory_in_gbs = local.node_type.memory_in_gbs
|
||||
ocpus = local.node_type.ocpus
|
||||
}
|
||||
node_config_details {
|
||||
size = var.min_nodes_per_pool
|
||||
placement_configs {
|
||||
availability_domain = data.oci_identity_availability_domains._.availability_domains[0].name
|
||||
subnet_id = oci_core_subnet.nodes.id
|
||||
}
|
||||
}
|
||||
node_source_details {
|
||||
image_id = data.oci_core_images._[each.key].images[0].id
|
||||
source_type = "image"
|
||||
}
|
||||
}
|
||||
81
prepare-tf/modules/oraclecloud/network.tf
Normal file
81
prepare-tf/modules/oraclecloud/network.tf
Normal file
@@ -0,0 +1,81 @@
|
||||
resource "oci_core_vcn" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.0.0/16"
|
||||
display_name = "tf-vcn"
|
||||
}
|
||||
|
||||
#
|
||||
# On OCI, you can have either "public" or "private" subnets.
|
||||
# In both cases, instances get addresses in the VCN CIDR block;
|
||||
# but instances in "public" subnets also get a public address.
|
||||
#
|
||||
# Then, to enable communication to the outside world, you need:
|
||||
# - for public subnets, an "internet gateway"
|
||||
# (will allow inbound and outbound traffic)
|
||||
# - for private subnets, a "NAT gateway"
|
||||
# (will only allow outbound traffic)
|
||||
# - optionally, for private subnets, a "service gateway"
|
||||
# (to access other OCI services, e.g. object store)
|
||||
#
|
||||
# In this configuration, we use public subnets, and since we
|
||||
# need outside access, we add an internet gateway.
|
||||
#
|
||||
# Note that the default routing table in a VCN is empty, so we
|
||||
# add the internet gateway to the default routing table.
|
||||
# Similarly, the default security group in a VCN blocks almost
|
||||
# everything, so we add a blanket rule in that security group.
|
||||
#
|
||||
|
||||
resource "oci_core_internet_gateway" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
display_name = "tf-igw"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
}
|
||||
|
||||
resource "oci_core_default_route_table" "_" {
|
||||
manage_default_resource_id = oci_core_vcn._.default_route_table_id
|
||||
route_rules {
|
||||
destination = "0.0.0.0/0"
|
||||
destination_type = "CIDR_BLOCK"
|
||||
network_entity_id = oci_core_internet_gateway._.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_core_default_security_list" "_" {
|
||||
manage_default_resource_id = oci_core_vcn._.default_security_list_id
|
||||
ingress_security_rules {
|
||||
protocol = "all"
|
||||
source = "0.0.0.0/0"
|
||||
}
|
||||
egress_security_rules {
|
||||
protocol = "all"
|
||||
destination = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "controlplane" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.254.0/24"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-controlplane"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "nodes" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.0.0/20"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-nodes"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "loadbalancers" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.96.0/20"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-loadbalancers"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
11
prepare-tf/modules/oraclecloud/outputs.tf
Normal file
11
prepare-tf/modules/oraclecloud/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
data "oci_containerengine_cluster_kube_config" "_" {
|
||||
cluster_id = oci_containerengine_cluster._.id
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = data.oci_containerengine_cluster_kube_config._.content
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = oci_containerengine_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/oraclecloud/provider.tf
Normal file
8
prepare-tf/modules/oraclecloud/provider.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
oci = {
|
||||
source = "hashicorp/oci"
|
||||
version = "4.48.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
78
prepare-tf/modules/oraclecloud/variables.tf
Normal file
78
prepare-tf/modules/oraclecloud/variables.tf
Normal file
@@ -0,0 +1,78 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
locals {
|
||||
arm_pool = {
|
||||
shape = "VM.Standard.A1.Flex"
|
||||
}
|
||||
x86_pool = {
|
||||
shape = "VM.Standard.E4.Flex"
|
||||
}
|
||||
pools = var.enable_arm_pool ? {
|
||||
"oke-arm" = local.arm_pool
|
||||
"oke-x86" = local.x86_pool
|
||||
} : {
|
||||
"oke-x86" = local.x86_pool
|
||||
}
|
||||
}
|
||||
|
||||
output "pool" {
|
||||
value = local.pools
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
# FIXME put better typing here
|
||||
type = map(map(number))
|
||||
default = {
|
||||
"S" = {
|
||||
memory_in_gbs = 2
|
||||
ocpus = 1
|
||||
}
|
||||
"M" = {
|
||||
memory_in_gbs = 4
|
||||
ocpus = 1
|
||||
}
|
||||
"L" = {
|
||||
memory_in_gbs = 8
|
||||
ocpus = 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# oci ce cluster-options get --cluster-option-id all | jq -r '.data["kubernetes-versions"][]'
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "v1.20.11"
|
||||
}
|
||||
18
prepare-tf/modules/scaleway/main.tf
Normal file
18
prepare-tf/modules/scaleway/main.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
resource "scaleway_k8s_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
tags = var.common_tags
|
||||
version = var.k8s_version
|
||||
cni = var.cni
|
||||
}
|
||||
|
||||
resource "scaleway_k8s_pool" "_" {
|
||||
cluster_id = scaleway_k8s_cluster._.id
|
||||
name = "scw-x86"
|
||||
tags = var.common_tags
|
||||
node_type = local.node_type
|
||||
size = var.min_nodes_per_pool
|
||||
min_size = var.min_nodes_per_pool
|
||||
max_size = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
autoscaling = true
|
||||
autohealing = true
|
||||
}
|
||||
7
prepare-tf/modules/scaleway/outputs.tf
Normal file
7
prepare-tf/modules/scaleway/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "kubeconfig" {
|
||||
value = scaleway_k8s_cluster._.kubeconfig.0.config_file
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = scaleway_k8s_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/scaleway/providers.tf
Normal file
8
prepare-tf/modules/scaleway/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
scaleway = {
|
||||
source = "scaleway/scaleway"
|
||||
version = "2.1.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
55
prepare-tf/modules/scaleway/variables.tf
Normal file
55
prepare-tf/modules/scaleway/variables.tf
Normal file
@@ -0,0 +1,55 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "DEV1-S"
|
||||
"M" = "DEV1-M"
|
||||
"L" = "DEV1-L"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
variable "cni" {
|
||||
type = string
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
# See supported versions with:
|
||||
# scw k8s version list -o json | jq -r .[].name
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.22.2"
|
||||
}
|
||||
3
prepare-tf/providers.tf
Normal file
3
prepare-tf/providers.tf
Normal file
@@ -0,0 +1,3 @@
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
}
|
||||
225
prepare-tf/stage2.tmpl
Normal file
225
prepare-tf/stage2.tmpl
Normal file
@@ -0,0 +1,225 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.0.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
%{ for index, cluster in clusters ~}
|
||||
|
||||
provider "kubernetes" {
|
||||
alias = "cluster_${index}"
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
spec {
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "shpod"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "shpod"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
service_account_name = "shpod"
|
||||
container {
|
||||
image = "jpetazzo/shpod"
|
||||
name = "shpod"
|
||||
env {
|
||||
name = "PASSWORD"
|
||||
value = random_string.shpod_${index}.result
|
||||
}
|
||||
lifecycle {
|
||||
post_start {
|
||||
exec {
|
||||
command = [ "sh", "-c", "curl http://myip.enix.org/REMOTE_ADDR > /etc/HOSTIP || true" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
resources {
|
||||
limits = {
|
||||
cpu = "2"
|
||||
memory = "500M"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "250M"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
lifecycle {
|
||||
# Folks might alter their shpod Service to expose extra ports.
|
||||
# Don't reset their changes.
|
||||
ignore_changes = [ spec ]
|
||||
}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
app = "shpod"
|
||||
}
|
||||
port {
|
||||
name = "ssh"
|
||||
port = 22
|
||||
target_port = 22
|
||||
node_port = 32222
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service_account" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "cluster-admin"
|
||||
}
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = "shpod"
|
||||
namespace = "shpod"
|
||||
}
|
||||
}
|
||||
|
||||
resource "random_string" "shpod_${index}" {
|
||||
length = 6
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
alias = "cluster_${index}"
|
||||
kubernetes {
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "metrics_server_${index}" {
|
||||
provider = helm.cluster_${index}
|
||||
repository = "https://charts.bitnami.com/bitnami"
|
||||
chart = "metrics-server"
|
||||
name = "metrics-server"
|
||||
namespace = "metrics-server"
|
||||
create_namespace = true
|
||||
set {
|
||||
name = "apiService.create"
|
||||
value = "true"
|
||||
}
|
||||
set {
|
||||
name = "extraArgs.kubelet-insecure-tls"
|
||||
value = "true"
|
||||
}
|
||||
set {
|
||||
name = "extraArgs.kubelet-preferred-address-types"
|
||||
value = "InternalIP"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "kubeconfig_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "kubeconfig"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
data = {
|
||||
kubeconfig_from_provider = file("./kubeconfig.${index}")
|
||||
kubeconfig_cluster_admin = <<-EOT
|
||||
kind: Config
|
||||
apiVersion: v1
|
||||
current-context: cluster-admin@k8s-${index}
|
||||
clusters:
|
||||
- name: k8s-${index}
|
||||
cluster:
|
||||
certificate-authority-data: $${yamldecode(file("./kubeconfig.${index}")).clusters.0.cluster.certificate-authority-data}
|
||||
server: $${yamldecode(file("./kubeconfig.${index}")).clusters.0.cluster.server}
|
||||
contexts:
|
||||
- name: cluster-admin@k8s-${index}
|
||||
context:
|
||||
cluster: k8s-${index}
|
||||
user: cluster-admin
|
||||
users:
|
||||
- name: cluster-admin
|
||||
user:
|
||||
client-key-data: $${base64encode(tls_private_key.cluster_admin_${index}.private_key_pem)}
|
||||
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request.cluster_admin_${index}.certificate)}
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_private_key" "cluster_admin_${index}" {
|
||||
algorithm = "RSA"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "cluster_admin_${index}" {
|
||||
key_algorithm = tls_private_key.cluster_admin_${index}.algorithm
|
||||
private_key_pem = tls_private_key.cluster_admin_${index}.private_key_pem
|
||||
subject {
|
||||
common_name = "cluster-admin"
|
||||
organization = "system:masters"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_certificate_signing_request" "cluster_admin_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "cluster-admin"
|
||||
}
|
||||
spec {
|
||||
usages = ["client auth"]
|
||||
request = tls_cert_request.cluster_admin_${index}.cert_request_pem
|
||||
}
|
||||
auto_approve = true
|
||||
}
|
||||
|
||||
%{ endfor ~}
|
||||
|
||||
output "ip_addresses_of_nodes" {
|
||||
value = join("\n", [
|
||||
%{ for index, cluster in clusters ~}
|
||||
join("\t", concat(
|
||||
[ random_string.shpod_${index}.result, "ssh -l k8s -p 32222" ],
|
||||
split(" ", file("./externalips.${index}"))
|
||||
)),
|
||||
%{ endfor ~}
|
||||
])
|
||||
}
|
||||
28
prepare-tf/variables.tf
Normal file
28
prepare-tf/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
||||
variable "how_many_clusters" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
# Can be S, M, L.
|
||||
# S = 2 GB RAM
|
||||
# M = 4 GB RAM
|
||||
# L = 8 GB RAM
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
@@ -8,3 +8,4 @@ export TF_VAR_domain="Default"
|
||||
export TF_VAR_password="..."
|
||||
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
export TF_VAR_image="Ubuntu 18.04"
|
||||
|
||||
@@ -88,3 +88,8 @@ need_settings() {
|
||||
die "Settings file $1 doesn't exist."
|
||||
fi
|
||||
}
|
||||
|
||||
need_login_password() {
|
||||
USER_LOGIN=$(yq -r .user_login < tags/$TAG/settings.yaml)
|
||||
USER_PASSWORD=$(yq -r .user_password < tags/$TAG/settings.yaml)
|
||||
}
|
||||
78
prepare-vms/lib/clusterize.py
Normal file
78
prepare-vms/lib/clusterize.py
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
#################################
|
||||
|
||||
config = yaml.load(open("/tmp/settings.yaml"))
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
CLUSTER_PREFIX = config["clusterprefix"]
|
||||
|
||||
#################################
|
||||
|
||||
# This script will be run as ubuntu user, which has root privileges.
|
||||
|
||||
STEP = 0
|
||||
|
||||
def bold(msg):
|
||||
return "{} {} {}".format("$(tput smso)", msg, "$(tput rmso)")
|
||||
|
||||
def system(cmd):
|
||||
global STEP
|
||||
with open("/tmp/pp.status", "a") as f:
|
||||
t1 = time.time()
|
||||
f.write(bold("--- RUNNING [step {}] ---> {}...".format(STEP, cmd)))
|
||||
retcode = os.system(cmd)
|
||||
t2 = time.time()
|
||||
td = str(t2-t1)[:5]
|
||||
f.write(bold("[{}] in {}s\n".format(retcode, td)))
|
||||
STEP += 1
|
||||
with open(os.environ["HOME"] + "/.bash_history", "a") as f:
|
||||
f.write("{}\n".format(cmd))
|
||||
if retcode != 0:
|
||||
msg = "The following command failed with exit code {}:\n".format(retcode)
|
||||
msg+= cmd
|
||||
raise(Exception(msg))
|
||||
|
||||
# Get our public IP address
|
||||
# ipv4_retrieval_endpoint = "http://169.254.169.254/latest/meta-data/public-ipv4"
|
||||
ipv4_retrieval_endpoint = "http://myip.enix.org/REMOTE_ADDR"
|
||||
system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
|
||||
ipv4 = open("/tmp/ipv4").read()
|
||||
system("echo HOSTIP={} | sudo tee -a /etc/environment".format(ipv4))
|
||||
|
||||
### BEGIN CLUSTERING ###
|
||||
|
||||
addresses = list(l.strip() for l in sys.stdin)
|
||||
|
||||
assert ipv4 in addresses
|
||||
|
||||
def makenames(addrs):
|
||||
return [ "%s%s"%(CLUSTER_PREFIX, i+1) for i in range(len(addrs)) ]
|
||||
|
||||
while addresses:
|
||||
cluster = addresses[:CLUSTER_SIZE]
|
||||
addresses = addresses[CLUSTER_SIZE:]
|
||||
if ipv4 not in cluster:
|
||||
continue
|
||||
names = makenames(cluster)
|
||||
for ipaddr, name in zip(cluster, names):
|
||||
system("grep ^{} /etc/hosts || echo {} {} | sudo tee -a /etc/hosts"
|
||||
.format(ipaddr, ipaddr, name))
|
||||
print(cluster)
|
||||
|
||||
mynode = cluster.index(ipv4) + 1
|
||||
system("echo {}{} | sudo tee /etc/hostname".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo hostname {}{}".format(CLUSTER_PREFIX, mynode))
|
||||
|
||||
# Record the IPV4 and name of the first node
|
||||
system("echo {} | sudo tee /etc/ipv4_of_first_node".format(cluster[0]))
|
||||
system("echo {} | sudo tee /etc/name_of_first_node".format(names[0]))
|
||||
|
||||
# Create a convenience file to easily check if we're the first node
|
||||
if ipv4 == cluster[0]:
|
||||
system("sudo ln -sf /bin/true /usr/local/bin/i_am_first_node")
|
||||
else:
|
||||
system("sudo ln -sf /bin/false /usr/local/bin/i_am_first_node")
|
||||
@@ -57,30 +57,103 @@ _cmd_clean() {
|
||||
done
|
||||
}
|
||||
|
||||
_cmd deploy "Install Docker on a bunch of running VMs"
|
||||
_cmd_deploy() {
|
||||
_cmd createuser "Create the user that students will use"
|
||||
_cmd_createuser() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
need_login_password
|
||||
|
||||
# wait until all hosts are reachable before trying to deploy
|
||||
info "Trying to reach $TAG instances..."
|
||||
while ! tag_is_reachable; do
|
||||
>/dev/stderr echo -n "."
|
||||
sleep 2
|
||||
done
|
||||
>/dev/stderr echo ""
|
||||
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
|
||||
# If this VM image is using cloud-init,
|
||||
# wait for cloud-init to be done
|
||||
pssh "
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi"
|
||||
set -e
|
||||
# Create the user if it doesn't exist yet.
|
||||
id $USER_LOGIN || sudo useradd -d /home/$USER_LOGIN -g users -m -s /bin/bash $USER_LOGIN
|
||||
# Add them to the docker group, if there is one.
|
||||
grep ^docker: /etc/group && sudo usermod -aG docker $USER_LOGIN
|
||||
# Set their password.
|
||||
echo $USER_LOGIN:$USER_PASSWORD | sudo chpasswd
|
||||
# Add them to sudoers and allow passwordless authentication.
|
||||
echo '$USER_LOGIN ALL=(ALL) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/$USER_LOGIN
|
||||
"
|
||||
|
||||
pssh "
|
||||
set -e
|
||||
sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
|
||||
sudo service ssh restart
|
||||
"
|
||||
|
||||
pssh "
|
||||
set -e
|
||||
cd /home/$USER_LOGIN
|
||||
sudo -u $USER_LOGIN mkdir -p .ssh
|
||||
if i_am_first_node; then
|
||||
# Generate a key pair with an empty passphrase.
|
||||
if ! sudo -u $USER_LOGIN [ -f .ssh/id_rsa ]; then
|
||||
sudo -u $USER_LOGIN ssh-keygen -t rsa -f .ssh/id_rsa -P ''
|
||||
sudo -u $USER_LOGIN cp .ssh/id_rsa.pub .ssh/authorized_keys
|
||||
fi
|
||||
fi
|
||||
"
|
||||
|
||||
pssh "
|
||||
set -e
|
||||
cd /home/$USER_LOGIN
|
||||
if ! i_am_first_node; then
|
||||
# Copy keys from the first node.
|
||||
ssh $SSHOPTS \$(cat /etc/name_of_first_node) sudo -u $USER_LOGIN tar -C /home/$USER_LOGIN -cvf- .ssh |
|
||||
sudo -u $USER_LOGIN tar -xf-
|
||||
fi
|
||||
"
|
||||
|
||||
# FIXME do this only once.
|
||||
pssh -I "sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc" <<"SQRL"
|
||||
|
||||
# Fancy prompt courtesy of @soulshake.
|
||||
export PS1='\e[1m\e[31m[$HOSTIP] \e[32m($(docker-prompt)) \e[34m\u@\h\e[35m \w\e[0m\n$ '
|
||||
|
||||
# Bigger history, in a different file, and saved before executing each command.
|
||||
export HISTSIZE=9999
|
||||
export HISTFILESIZE=9999
|
||||
shopt -s histappend
|
||||
trap 'history -a' DEBUG
|
||||
export HISTFILE=~/.history
|
||||
SQRL
|
||||
|
||||
pssh -I "sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.vimrc" <<SQRL
|
||||
syntax on
|
||||
set autoindent
|
||||
set expandtab
|
||||
set number
|
||||
set shiftwidth=2
|
||||
set softtabstop=2
|
||||
set nowrap
|
||||
SQRL
|
||||
|
||||
pssh -I "sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.tmux.conf" <<SQRL
|
||||
bind h select-pane -L
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
bind l select-pane -R
|
||||
|
||||
# Allow using mouse to switch panes
|
||||
set -g mouse on
|
||||
|
||||
# Make scrolling with wheels work
|
||||
|
||||
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
|
||||
bind -n WheelDownPane select-pane -t= \; send-keys -M
|
||||
SQRL
|
||||
|
||||
# Install docker-prompt script
|
||||
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
|
||||
pssh sudo chmod +x /usr/local/bin/docker-prompt
|
||||
|
||||
echo user_ok > tags/$TAG/status
|
||||
}
|
||||
|
||||
_cmd clusterize "Group VMs in clusters"
|
||||
_cmd_clusterize() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Special case for scaleway since it doesn't come with sudo
|
||||
if [ "$INFRACLASS" = "scaleway" ]; then
|
||||
@@ -116,40 +189,19 @@ _cmd_deploy() {
|
||||
#fi"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
|
||||
# Install docker-prompt script
|
||||
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
|
||||
pssh sudo chmod +x /usr/local/bin/docker-prompt
|
||||
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
|
||||
pssh "
|
||||
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
|
||||
ssh $SSHOPTS \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
sudo -u docker tar -C /home/docker -xf-"
|
||||
|
||||
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
|
||||
pssh "
|
||||
grep docker@ /home/docker/.ssh/authorized_keys ||
|
||||
cat /home/docker/.ssh/id_rsa.pub |
|
||||
sudo -u docker tee -a /home/docker/.ssh/authorized_keys"
|
||||
pssh -I tee /tmp/clusterize.py <lib/clusterize.py
|
||||
pssh --timeout 900 --send-input "python /tmp/clusterize.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
|
||||
# On the first node, create and deploy TLS certs using Docker Machine
|
||||
# (Currently disabled.)
|
||||
true || pssh "
|
||||
if i_am_first_node; then
|
||||
grep '[0-9]\$' /etc/hosts |
|
||||
xargs -n2 sudo -H -u docker \
|
||||
docker-machine create -d generic --generic-ssh-user docker --generic-ip-address
|
||||
xargs -n2 sudo -H -u $USER_LOGIN \
|
||||
docker-machine create -d generic --generic-ssh-user $USER_LOGIN --generic-ip-address
|
||||
fi"
|
||||
|
||||
sep "Deployed tag $TAG"
|
||||
echo deployed > tags/$TAG/status
|
||||
info "You may want to run one of the following commands:"
|
||||
info "$0 kube $TAG"
|
||||
info "$0 pull_images $TAG"
|
||||
info "$0 cards $TAG"
|
||||
echo cluster_ok > tags/$TAG/status
|
||||
}
|
||||
|
||||
_cmd disabledocker "Stop Docker Engine and don't restart it automatically"
|
||||
@@ -158,10 +210,57 @@ _cmd_disabledocker() {
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo systemctl disable docker.service
|
||||
sudo systemctl disable docker.socket
|
||||
sudo systemctl stop docker
|
||||
sudo killall containerd
|
||||
sudo systemctl disable docker.socket --now
|
||||
sudo systemctl disable docker.service --now
|
||||
sudo systemctl disable containerd.service --now
|
||||
"
|
||||
}
|
||||
|
||||
_cmd docker "Install and start Docker"
|
||||
_cmd_docker() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
set -e
|
||||
# On EC2, the ephemeral disk might be mounted on /mnt.
|
||||
# If /mnt is a mountpoint, place Docker workspace on it.
|
||||
if mountpoint -q /mnt; then
|
||||
sudo mkdir -p /mnt/docker
|
||||
sudo ln -sfn /mnt/docker /var/lib/docker
|
||||
fi
|
||||
|
||||
# This will install the latest Docker.
|
||||
sudo apt-get -qy install apt-transport-https ca-certificates curl software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
sudo add-apt-repository 'deb https://download.docker.com/linux/ubuntu bionic stable'
|
||||
sudo apt-get -q update
|
||||
sudo apt-get -qy install docker-ce
|
||||
"
|
||||
|
||||
##VERSION## https://github.com/docker/compose/releases
|
||||
if [ "$ARCHITECTURE" ]; then
|
||||
COMPOSE_VERSION=v2.0.1
|
||||
COMPOSE_PLATFORM='linux-$(uname -m)'
|
||||
else
|
||||
COMPOSE_VERSION=1.29.2
|
||||
COMPOSE_PLATFORM='Linux-$(uname -m)'
|
||||
fi
|
||||
pssh "
|
||||
set -e
|
||||
### Install docker-compose.
|
||||
sudo curl -fsSL -o /usr/local/bin/docker-compose \
|
||||
https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-$COMPOSE_PLATFORM
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
docker-compose version
|
||||
|
||||
### Install docker-machine.
|
||||
##VERSION## https://github.com/docker/machine/releases
|
||||
MACHINE_VERSION=v0.16.2
|
||||
sudo curl -fsSL -o /usr/local/bin/docker-machine \
|
||||
https://github.com/docker/machine/releases/download/\$MACHINE_VERSION/docker-machine-\$(uname -s)-\$(uname -m)
|
||||
sudo chmod +x /usr/local/bin/docker-machine
|
||||
docker-machine version
|
||||
"
|
||||
}
|
||||
|
||||
@@ -174,23 +273,24 @@ _cmd_kubebins() {
|
||||
ETCD_VERSION=v3.4.13
|
||||
K8SBIN_VERSION=v1.19.11 # Can't go to 1.20 because it requires a serviceaccount signing key.
|
||||
CNI_VERSION=v0.8.7
|
||||
ARCH=${ARCHITECTURE-amd64}
|
||||
pssh --timeout 300 "
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-amd64.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$ARCH.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/$K8SBIN_VERSION/kubernetes-server-linux-amd64.tar.gz \
|
||||
curl -L https://dl.k8s.io/$K8SBIN_VERSION/kubernetes-server-linux-$ARCH.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-amd64-$CNI_VERSION.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$ARCH-$CNI_VERSION.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -200,12 +300,13 @@ _cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
|
||||
_cmd_kube() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
need_login_password
|
||||
|
||||
# Optional version, e.g. 1.13.5
|
||||
KUBEVERSION=$2
|
||||
if [ "$KUBEVERSION" ]; then
|
||||
EXTRA_APTGET="=$KUBEVERSION-00"
|
||||
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
|
||||
EXTRA_KUBEADM="kubernetesVersion: v$KUBEVERSION"
|
||||
else
|
||||
EXTRA_APTGET=""
|
||||
EXTRA_KUBEADM=""
|
||||
@@ -235,17 +336,34 @@ _cmd_kube() {
|
||||
pssh --timeout 200 "
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4) --ignore-preflight-errors=NumCPU
|
||||
cat >/tmp/kubeadm-config.yaml <<EOF
|
||||
kind: InitConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
bootstrapTokens:
|
||||
- token: \$(cat /tmp/token)
|
||||
---
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
cgroupDriver: cgroupfs
|
||||
---
|
||||
kind: ClusterConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
apiServer:
|
||||
certSANs:
|
||||
- \$(cat /tmp/ipv4)
|
||||
$EXTRA_KUBEADM
|
||||
EOF
|
||||
sudo kubeadm init --config=/tmp/kubeadm-config.yaml --ignore-preflight-errors=NumCPU
|
||||
fi"
|
||||
|
||||
# Put kubeconfig in ubuntu's and docker's accounts
|
||||
# Put kubeconfig in ubuntu's and $USER_LOGIN's accounts
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
sudo mkdir -p \$HOME/.kube /home/docker/.kube &&
|
||||
sudo mkdir -p \$HOME/.kube /home/$USER_LOGIN/.kube &&
|
||||
sudo cp /etc/kubernetes/admin.conf \$HOME/.kube/config &&
|
||||
sudo cp /etc/kubernetes/admin.conf /home/docker/.kube/config &&
|
||||
sudo cp /etc/kubernetes/admin.conf /home/$USER_LOGIN/.kube/config &&
|
||||
sudo chown -R \$(id -u) \$HOME/.kube &&
|
||||
sudo chown -R docker /home/docker/.kube
|
||||
sudo chown -R $USER_LOGIN /home/$USER_LOGIN/.kube
|
||||
fi"
|
||||
|
||||
# Install weave as the pod network
|
||||
@@ -274,31 +392,65 @@ _cmd kubetools "Install a bunch of CLI tools for Kubernetes"
|
||||
_cmd_kubetools() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
need_login_password
|
||||
|
||||
ARCH=${ARCHITECTURE-amd64}
|
||||
|
||||
# Folks, please, be consistent!
|
||||
# Either pick "uname -m" (on Linux, that's x86_64, aarch64, etc.)
|
||||
# Or GOARCH (amd64, arm64, etc.)
|
||||
# But don't mix both! Thank you ♥
|
||||
case $ARCH in
|
||||
amd64)
|
||||
HERP_DERP_ARCH=x86_64
|
||||
TILT_ARCH=x86_64
|
||||
;;
|
||||
*)
|
||||
HERP_DERP_ARCH=$ARCH
|
||||
TILT_ARCH=${ARCH}_ALPHA
|
||||
;;
|
||||
esac
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
|
||||
sudo ln -sf \$HOME/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf \$HOME/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp \$HOME/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
|
||||
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
|
||||
sudo -u docker tee -a /home/docker/.bashrc <<EOF
|
||||
. \$HOME/kube-ps1/kube-ps1.sh
|
||||
set -e
|
||||
if ! [ -x /usr/local/bin/kctx ]; then
|
||||
cd /tmp
|
||||
git clone https://github.com/ahmetb/kubectx
|
||||
sudo cp kubectx/kubectx /usr/local/bin/kctx
|
||||
sudo cp kubectx/kubens /usr/local/bin/kns
|
||||
sudo cp kubectx/completion/*.bash /etc/bash_completion.d
|
||||
fi"
|
||||
|
||||
# Install kube-ps1
|
||||
pssh "
|
||||
set -e
|
||||
if ! [ -f /etc/profile.d/kube-ps1.sh ]; then
|
||||
cd /tmp
|
||||
git clone https://github.com/jonmosco/kube-ps1
|
||||
sudo cp kube-ps1/kube-ps1.sh /etc/profile.d/kube-ps1.sh
|
||||
sudo -u $USER_LOGIN sed -i s/docker-prompt/kube_ps1/ /home/$USER_LOGIN/.bashrc &&
|
||||
sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc <<EOF
|
||||
KUBE_PS1_PREFIX=""
|
||||
KUBE_PS1_SUFFIX=""
|
||||
KUBE_PS1_SYMBOL_ENABLE="false"
|
||||
KUBE_PS1_CTX_COLOR="green"
|
||||
KUBE_PS1_NS_COLOR="green"
|
||||
EOF"
|
||||
EOF
|
||||
fi"
|
||||
|
||||
# Install stern
|
||||
##VERSION## https://github.com/stern/stern/releases
|
||||
STERN_VERSION=1.20.1
|
||||
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
|
||||
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx --strip-components=1 $FILENAME/stern
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
stern --version
|
||||
fi"
|
||||
|
||||
# Install helm
|
||||
@@ -306,24 +458,30 @@ EOF"
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
helm version
|
||||
fi"
|
||||
|
||||
# Install kustomize
|
||||
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
|
||||
KUSTOMIZE_VERSION=v4.4.0
|
||||
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.6.1/kustomize_v3.6.1_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx kustomize
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx kustomize
|
||||
echo complete -C /usr/local/bin/kustomize kustomize | sudo tee /etc/bash_completion.d/kustomize
|
||||
kustomize version
|
||||
fi"
|
||||
|
||||
# Install ship
|
||||
# Note: 0.51.3 is the last version that doesn't display GIN-debug messages
|
||||
# (don't want to get folks confused by that!)
|
||||
# Only install ship on Intel platforms (no ARM 64 builds).
|
||||
[ "$ARCH" = "amd64" ] &&
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_amd64.tar.gz |
|
||||
curl -fsSL https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_$ARCH.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
@@ -331,61 +489,81 @@ EOF"
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo curl -fsSLo /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/$ARCH/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
aws-iam-authenticator version
|
||||
fi"
|
||||
|
||||
# Install the krew package manager
|
||||
pssh "
|
||||
if [ ! -d /home/docker/.krew ]; then
|
||||
if [ ! -d /home/$USER_LOGIN/.krew ]; then
|
||||
cd /tmp &&
|
||||
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz |
|
||||
KREW=krew-linux_$ARCH
|
||||
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/\$KREW.tar.gz |
|
||||
tar -zxf- &&
|
||||
sudo -u docker -H ./krew-linux_amd64 install krew &&
|
||||
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
|
||||
sudo -u $USER_LOGIN -H ./\$KREW install krew &&
|
||||
echo export PATH=/home/$USER_LOGIN/.krew/bin:\\\$PATH | sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc
|
||||
fi"
|
||||
|
||||
# Install k9s
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
VERSION=v0.24.10 &&
|
||||
FILENAME=k9s_\${VERSION}_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/download/\$VERSION/\$FILENAME |
|
||||
FILENAME=k9s_Linux_$HERP_DERP_ARCH.tar.gz &&
|
||||
curl -fsSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
k9s version
|
||||
fi"
|
||||
|
||||
# Install popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
FILENAME=popeye_Linux_$HERP_DERP_ARCH.tar.gz &&
|
||||
curl -fsSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin popeye
|
||||
popeye version
|
||||
fi"
|
||||
|
||||
# Install Tilt
|
||||
# Official instructions:
|
||||
# curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
|
||||
# But the install script is not arch-aware (see https://github.com/tilt-dev/tilt/pull/5050).
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/tilt ]; then
|
||||
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
|
||||
TILT_VERSION=0.22.15
|
||||
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
|
||||
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin tilt
|
||||
tilt version
|
||||
fi"
|
||||
|
||||
# Install Skaffold
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/skaffold ]; then
|
||||
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 &&
|
||||
curl -fsSLo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-$ARCH &&
|
||||
sudo install skaffold /usr/local/bin/
|
||||
skaffold version
|
||||
fi"
|
||||
|
||||
# Install Kompose
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kompose ]; then
|
||||
curl -Lo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-amd64 &&
|
||||
curl -fsSLo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-$ARCH &&
|
||||
sudo install kompose /usr/local/bin
|
||||
kompose version
|
||||
fi"
|
||||
|
||||
pssh "
|
||||
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
|
||||
KUBESEAL_VERSION=v0.16.0
|
||||
case $ARCH in
|
||||
amd64) FILENAME=kubeseal-linux-amd64;;
|
||||
arm64) FILENAME=kubeseal-arm64;;
|
||||
*) FILENAME=nope;;
|
||||
esac
|
||||
[ "$FILENAME" = "nope" ] || pssh "
|
||||
if [ ! -x /usr/local/bin/kubeseal ]; then
|
||||
curl -Lo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.13.1/kubeseal-linux-amd64 &&
|
||||
curl -fsSLo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/$KUBESEAL_VERSION/$FILENAME &&
|
||||
sudo install kubeseal /usr/local/bin
|
||||
kubeseal --version
|
||||
fi"
|
||||
}
|
||||
|
||||
@@ -412,6 +590,7 @@ _cmd_kubetest() {
|
||||
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
|
||||
done
|
||||
fi"
|
||||
echo kube_ok > tags/$TAG/status
|
||||
}
|
||||
|
||||
_cmd ips "Show the IP addresses for a given tag"
|
||||
@@ -457,14 +636,6 @@ _cmd_maketag() {
|
||||
date +%Y-%m-%d-%H-%M-$MS-$USER
|
||||
}
|
||||
|
||||
_cmd ping "Ping VMs in a given tag, to check that they have network access"
|
||||
_cmd_ping() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
fping < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd netfix "Disable GRO and run a pinger job on the VMs"
|
||||
_cmd_netfix () {
|
||||
TAG=$1
|
||||
@@ -490,14 +661,28 @@ EOF
|
||||
sudo systemctl start pinger"
|
||||
}
|
||||
|
||||
_cmd ping "Ping VMs in a given tag, to check that they have network access"
|
||||
_cmd_ping() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
fping < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd tailhist "Install history viewer on port 1088"
|
||||
_cmd_tailhist () {
|
||||
TAG=$1
|
||||
need_tag
|
||||
need_login_password
|
||||
|
||||
pssh "
|
||||
wget https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0_amd64.deb
|
||||
sudo dpkg -i websocketd-0.3.0_amd64.deb
|
||||
ARCH=${ARCHITECTURE-amd64}
|
||||
[ "$ARCH" = "aarch64" ] && ARCH=arm64
|
||||
|
||||
pssh -i "
|
||||
set -e
|
||||
wget https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
|
||||
unzip websocketd-0.3.0-linux_$ARCH.zip websocketd
|
||||
sudo mv websocketd /usr/local/bin/websocketd
|
||||
sudo mkdir -p /tmp/tailhist
|
||||
sudo tee /root/tailhist.service <<EOF
|
||||
[Unit]
|
||||
@@ -508,16 +693,32 @@ WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/tmp/tailhist
|
||||
ExecStart=/usr/bin/websocketd --port=1088 --staticdir=. sh -c \"tail -n +1 -f /home/docker/.history || echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\"
|
||||
ExecStart=/usr/local/bin/websocketd --port=1088 --staticdir=. sh -c \"tail -n +1 -f /home/$USER_LOGIN/.history || echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\"
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /root/tailhist.service
|
||||
sudo systemctl start tailhist"
|
||||
sudo systemctl enable /root/tailhist.service --now
|
||||
"
|
||||
|
||||
pssh -I sudo tee /tmp/tailhist/index.html <lib/tailhist.html
|
||||
}
|
||||
|
||||
_cmd tools "Install a bunch of useful tools (editors, git, jq...)"
|
||||
_cmd_tools() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo apt-get -q update
|
||||
sudo apt-get -qy install apache2-utils emacs-nox git httping htop jid joe jq mosh python-setuptools tree unzip
|
||||
# This is for VMs with broken PRNG (symptom: running docker-compose randomly hangs)
|
||||
sudo apt-get -qy install haveged
|
||||
# I don't remember why we need to remove this
|
||||
sudo apt-get remove -y --purge dnsmasq-base
|
||||
"
|
||||
}
|
||||
|
||||
_cmd opensg "Open the default security group to ALL ingress traffic"
|
||||
_cmd_opensg() {
|
||||
need_infra $1
|
||||
@@ -583,9 +784,10 @@ _cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
need_login_password
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh $SSHOPTS docker@$IP
|
||||
info "Logging into $IP (default password: $USER_PASSWORD)"
|
||||
ssh $SSHOPTS $USER_LOGIN@$IP
|
||||
|
||||
}
|
||||
|
||||
@@ -634,7 +836,7 @@ _cmd_start() {
|
||||
infra_start $COUNT
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
echo created > tags/$TAG/status
|
||||
echo create_ok > tags/$TAG/status
|
||||
|
||||
# If the settings.yaml file has a "steps" field,
|
||||
# automatically execute all the actions listed in that field.
|
||||
@@ -648,8 +850,7 @@ _cmd_start() {
|
||||
if [ -z "$step" ]; then
|
||||
break
|
||||
fi
|
||||
sep
|
||||
info "Automatically executing step '$step'."
|
||||
sep "$TAG -> $step"
|
||||
TRY=1
|
||||
MAXTRY=10
|
||||
while ! $0 $step $TAG ; do
|
||||
@@ -661,7 +862,7 @@ _cmd_start() {
|
||||
die "Giving up."
|
||||
else
|
||||
sep
|
||||
info "Step '$step' failed. Let's wait 10 seconds and try again."
|
||||
info "Step '$step' failed for '$TAG'. Let's wait 10 seconds and try again."
|
||||
info "(Attempt $TRY out of $MAXTRY.)"
|
||||
sleep 10
|
||||
fi
|
||||
@@ -728,15 +929,15 @@ _cmd_tmux() {
|
||||
ssh $SSHOPTS -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd helmprom "Install Prometheus with Helm"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
sudo -u docker -H helm repo add prometheus-community https://prometheus-community.github.io/helm-charts/
|
||||
sudo -u docker -H helm install prometheus prometheus-community/prometheus \
|
||||
--namespace kube-system \
|
||||
sudo -u $USER_LOGIN -H helm upgrade --install prometheus prometheus \
|
||||
--repo https://prometheus-community.github.io/helm-charts/ \
|
||||
--namespace prometheus --create-namespace \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
@@ -769,6 +970,30 @@ _cmd_passwords() {
|
||||
info "Done."
|
||||
}
|
||||
|
||||
_cmd wait "Wait until VMs are ready (reachable and cloud init is done)"
|
||||
_cmd_wait() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Wait until all hosts are reachable.
|
||||
info "Trying to reach $TAG instances..."
|
||||
while ! pssh -t 5 true 2>&1 >/dev/null; do
|
||||
>/dev/stderr echo -n "."
|
||||
sleep 2
|
||||
done
|
||||
>/dev/stderr echo ""
|
||||
|
||||
# If this VM image is using cloud-init,
|
||||
# wait for cloud-init to be done
|
||||
info "Waiting for cloud-init to be done on $TAG instances..."
|
||||
pssh "
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi"
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
@@ -855,16 +1080,12 @@ pull_tag() {
|
||||
google/cadvisor \
|
||||
dockersamples/visualizer \
|
||||
nathanleclaire/redisonrails; do
|
||||
sudo -u docker docker pull $I
|
||||
sudo docker pull $I
|
||||
done'
|
||||
|
||||
info "Finished pulling images for $TAG."
|
||||
}
|
||||
|
||||
tag_is_reachable() {
|
||||
pssh -t 5 true 2>&1 >/dev/null
|
||||
}
|
||||
|
||||
test_tag() {
|
||||
ips_file=tags/$TAG/ips.txt
|
||||
info "Picking a random IP address in $ips_file to run tests."
|
||||
|
||||
@@ -217,7 +217,7 @@ aws_tag_instances() {
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a ${AWS_ARCHITECTURE-amd64} -v 18.04 -t hvm:ebs -N -q
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a ${ARCHITECTURE-amd64} -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
aws_greet() {
|
||||
|
||||
@@ -14,9 +14,9 @@ infra_start() {
|
||||
die "Aborting."
|
||||
fi
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
echo how_many_nodes = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
terraform output -raw ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
import yaml
|
||||
|
||||
#################################
|
||||
|
||||
config = yaml.load(open("/tmp/settings.yaml"))
|
||||
COMPOSE_VERSION = config["compose_version"]
|
||||
MACHINE_VERSION = config["machine_version"]
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
CLUSTER_PREFIX = config["clusterprefix"]
|
||||
ENGINE_VERSION = config["engine_version"]
|
||||
DOCKER_USER_PASSWORD = config["docker_user_password"]
|
||||
|
||||
#################################
|
||||
|
||||
# This script will be run as ubuntu user, which has root privileges.
|
||||
# docker commands will require sudo because the ubuntu user has no access to the docker socket.
|
||||
|
||||
STEP = 0
|
||||
START = time.time()
|
||||
|
||||
def bold(msg):
|
||||
return "{} {} {}".format("$(tput smso)", msg, "$(tput rmso)")
|
||||
|
||||
def system(cmd):
|
||||
global STEP
|
||||
with open("/tmp/pp.status", "a") as f:
|
||||
t1 = time.time()
|
||||
f.write(bold("--- RUNNING [step {}] ---> {}...".format(STEP, cmd)))
|
||||
retcode = os.system(cmd)
|
||||
t2 = time.time()
|
||||
td = str(t2-t1)[:5]
|
||||
f.write(bold("[{}] in {}s\n".format(retcode, td)))
|
||||
STEP += 1
|
||||
with open(os.environ["HOME"] + "/.bash_history", "a") as f:
|
||||
f.write("{}\n".format(cmd))
|
||||
if retcode != 0:
|
||||
msg = "The following command failed with exit code {}:\n".format(retcode)
|
||||
msg+= cmd
|
||||
raise(Exception(msg))
|
||||
|
||||
|
||||
# On EC2, the ephemeral disk might be mounted on /mnt.
|
||||
# If /mnt is a mountpoint, place Docker workspace on it.
|
||||
system("if mountpoint -q /mnt; then sudo mkdir -p /mnt/docker && sudo ln -sfn /mnt/docker /var/lib/docker; fi")
|
||||
|
||||
# Put our public IP in /tmp/ipv4
|
||||
# ipv4_retrieval_endpoint = "http://169.254.169.254/latest/meta-data/public-ipv4"
|
||||
ipv4_retrieval_endpoint = "http://myip.enix.org/REMOTE_ADDR"
|
||||
system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
|
||||
|
||||
ipv4 = open("/tmp/ipv4").read()
|
||||
|
||||
# Add a "docker" user with password coming from the settings
|
||||
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
|
||||
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
|
||||
|
||||
# Fancy prompt courtesy of @soulshake.
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
export PS1='\e[1m\e[31m[{}] \e[32m(\\$(docker-prompt)) \e[34m\u@\h\e[35m \w\e[0m\n$ '
|
||||
SQRL""".format(ipv4))
|
||||
|
||||
# Bigger history, in a different file, and saved before executing each command
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
export HISTSIZE=9999
|
||||
export HISTFILESIZE=9999
|
||||
shopt -s histappend
|
||||
trap 'history -a' DEBUG
|
||||
export HISTFILE=~/.history
|
||||
SQRL""")
|
||||
|
||||
# Custom .vimrc
|
||||
system("""sudo -u docker tee /home/docker/.vimrc <<SQRL
|
||||
syntax on
|
||||
set autoindent
|
||||
set expandtab
|
||||
set number
|
||||
set shiftwidth=2
|
||||
set softtabstop=2
|
||||
set nowrap
|
||||
SQRL""")
|
||||
|
||||
# Custom .tmux.conf
|
||||
system(
|
||||
"""sudo -u docker tee /home/docker/.tmux.conf <<SQRL
|
||||
bind h select-pane -L
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
bind l select-pane -R
|
||||
|
||||
# Allow using mouse to switch panes
|
||||
set -g mouse on
|
||||
|
||||
# Make scrolling with wheels work
|
||||
|
||||
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
|
||||
bind -n WheelDownPane select-pane -t= \; send-keys -M
|
||||
|
||||
SQRL"""
|
||||
)
|
||||
|
||||
|
||||
# add docker user to sudoers and allow password authentication
|
||||
system("""sudo tee /etc/sudoers.d/docker <<SQRL
|
||||
docker ALL=(ALL) NOPASSWD:ALL
|
||||
SQRL""")
|
||||
|
||||
system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config")
|
||||
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jid jq")
|
||||
system("sudo apt-get -qy install emacs-nox joe")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
#######################
|
||||
|
||||
# This will install the latest Docker.
|
||||
#system("curl --silent https://{}/ | grep -v '( set -x; sleep 20 )' | sudo sh".format(ENGINE_VERSION))
|
||||
system("sudo apt-get -qy install apt-transport-https ca-certificates curl software-properties-common")
|
||||
system("curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -")
|
||||
system("sudo add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial {}'".format(ENGINE_VERSION))
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install docker-ce")
|
||||
|
||||
### Install docker-compose
|
||||
system("sudo curl -sSL -o /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/{}/docker-compose-{}-{}".format(COMPOSE_VERSION, platform.system(), platform.machine()))
|
||||
system("sudo chmod +x /usr/local/bin/docker-compose")
|
||||
system("docker-compose version")
|
||||
|
||||
### Install docker-machine
|
||||
system("sudo curl -sSL -o /usr/local/bin/docker-machine https://github.com/docker/machine/releases/download/v{}/docker-machine-{}-{}".format(MACHINE_VERSION, platform.system(), platform.machine()))
|
||||
system("sudo chmod +x /usr/local/bin/docker-machine")
|
||||
system("docker-machine version")
|
||||
|
||||
system("sudo apt-get remove -y --purge dnsmasq-base")
|
||||
system("sudo apt-get -qy install python-setuptools pssh apache2-utils httping htop unzip mosh tree")
|
||||
|
||||
### Wait for Docker to be up.
|
||||
### (If we don't do this, Docker will not be responsive during the next step.)
|
||||
system("while ! sudo -u docker docker version ; do sleep 2; done")
|
||||
|
||||
### BEGIN CLUSTERING ###
|
||||
|
||||
addresses = list(l.strip() for l in sys.stdin)
|
||||
|
||||
assert ipv4 in addresses
|
||||
|
||||
def makenames(addrs):
|
||||
return [ "%s%s"%(CLUSTER_PREFIX, i+1) for i in range(len(addrs)) ]
|
||||
|
||||
while addresses:
|
||||
cluster = addresses[:CLUSTER_SIZE]
|
||||
addresses = addresses[CLUSTER_SIZE:]
|
||||
if ipv4 not in cluster:
|
||||
continue
|
||||
names = makenames(cluster)
|
||||
for ipaddr, name in zip(cluster, names):
|
||||
system("grep ^{} /etc/hosts || echo {} {} | sudo tee -a /etc/hosts"
|
||||
.format(ipaddr, ipaddr, name))
|
||||
print(cluster)
|
||||
|
||||
mynode = cluster.index(ipv4) + 1
|
||||
system("echo {}{} | sudo tee /etc/hostname".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo hostname {}{}".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo -u docker mkdir -p /home/docker/.ssh")
|
||||
system("sudo -u docker touch /home/docker/.ssh/authorized_keys")
|
||||
|
||||
# Create a convenience file to easily check if we're the first node
|
||||
if ipv4 == cluster[0]:
|
||||
system("sudo ln -sf /bin/true /usr/local/bin/i_am_first_node")
|
||||
# On the first node, if we don't have a private key, generate one (with empty passphrase)
|
||||
system("sudo -u docker [ -f /home/docker/.ssh/id_rsa ] || sudo -u docker ssh-keygen -t rsa -f /home/docker/.ssh/id_rsa -P ''")
|
||||
else:
|
||||
system("sudo ln -sf /bin/false /usr/local/bin/i_am_first_node")
|
||||
# Record the IPV4 and name of the first node
|
||||
system("echo {} | sudo tee /etc/ipv4_of_first_node".format(cluster[0]))
|
||||
system("echo {} | sudo tee /etc/name_of_first_node".format(names[0]))
|
||||
|
||||
FINISH = time.time()
|
||||
duration = "Initial deployment took {}s".format(str(FINISH - START)[:5])
|
||||
system("echo {}".format(duration))
|
||||
@@ -26,6 +26,7 @@ pssh() {
|
||||
|
||||
$PSSH -h $HOSTFILE -l $LOGIN \
|
||||
--par 100 \
|
||||
--timeout 300 \
|
||||
-O LogLevel=ERROR \
|
||||
-O UserKnownHostsFile=/dev/null \
|
||||
-O StrictHostKeyChecking=no \
|
||||
|
||||
@@ -30,21 +30,26 @@ domain_or_domain_file = sys.argv[1]
|
||||
if os.path.isfile(domain_or_domain_file):
|
||||
domains = open(domain_or_domain_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
tag = sys.argv[2]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
ips_file_or_tag = sys.argv[2]
|
||||
if os.path.isfile(ips_file_or_tag):
|
||||
lines = open(ips_file_or_tag).read().split('\n')
|
||||
clusters = [line.split() for line in lines]
|
||||
else:
|
||||
ips = open(f"tags/{ips_file_or_tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
clusters = []
|
||||
while ips:
|
||||
clusters.append(ips[:clustersize])
|
||||
ips = ips[clustersize:]
|
||||
else:
|
||||
domains = [domain_or_domain_file]
|
||||
ips = sys.argv[2:]
|
||||
clustersize = len(ips)
|
||||
clusters = [sys.argv[2:]]
|
||||
|
||||
# Now, do the work.
|
||||
while domains and ips:
|
||||
domain = domains[0]
|
||||
domains = domains[1:]
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
while domains and clusters:
|
||||
domain = domains.pop(0)
|
||||
cluster = clusters.pop(0)
|
||||
print(f"{domain} => {cluster}")
|
||||
zone = ""
|
||||
node = 0
|
||||
@@ -67,5 +72,5 @@ while domains and ips:
|
||||
if domains:
|
||||
print(f"Good, we have {len(domains)} domains left.")
|
||||
|
||||
if ips:
|
||||
print(f"Crap, we have {len(ips)} IP addresses left.")
|
||||
if clusters:
|
||||
print(f"Crap, we have {len(clusters)} clusters left.")
|
||||
|
||||
@@ -10,14 +10,22 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: k8s
|
||||
user_password: training
|
||||
|
||||
image:
|
||||
|
||||
steps:
|
||||
- wait
|
||||
- clusterize
|
||||
- tools
|
||||
- docker
|
||||
- disabledocker
|
||||
- createuser
|
||||
- webssh
|
||||
- tailhist
|
||||
- kubebins
|
||||
- kubetools
|
||||
- cards
|
||||
- ips
|
||||
|
||||
@@ -10,15 +10,23 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: k8s
|
||||
user_password: training
|
||||
|
||||
clusternumber: 100
|
||||
image:
|
||||
|
||||
steps:
|
||||
- disableaddrchecks
|
||||
- wait
|
||||
- clusterize
|
||||
- tools
|
||||
- docker
|
||||
- createuser
|
||||
- webssh
|
||||
- tailhist
|
||||
- kubebins
|
||||
- kubetools
|
||||
- cards
|
||||
- ips
|
||||
|
||||
@@ -10,15 +10,23 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: k8s
|
||||
user_password: training
|
||||
|
||||
clusternumber: 200
|
||||
image:
|
||||
|
||||
steps:
|
||||
- disableaddrchecks
|
||||
- wait
|
||||
- clusterize
|
||||
- tools
|
||||
- docker
|
||||
- createuser
|
||||
- webssh
|
||||
- tailhist
|
||||
- kubebins
|
||||
- kubetools
|
||||
- cards
|
||||
- ips
|
||||
|
||||
@@ -10,14 +10,22 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: k8s
|
||||
user_password: training
|
||||
|
||||
image:
|
||||
|
||||
steps:
|
||||
- wait
|
||||
- clusterize
|
||||
- tools
|
||||
- docker
|
||||
- createuser
|
||||
- webssh
|
||||
- tailhist
|
||||
- kube
|
||||
- kubetools
|
||||
- kubetest
|
||||
- cards
|
||||
- ips
|
||||
|
||||
@@ -12,18 +12,17 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.25.4
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: docker
|
||||
user_password: training
|
||||
|
||||
steps:
|
||||
- deploy
|
||||
- wait
|
||||
- clusterize
|
||||
- tools
|
||||
- docker
|
||||
- createuser
|
||||
- webssh
|
||||
- tailhist
|
||||
- cards
|
||||
- ips
|
||||
|
||||
@@ -12,12 +12,6 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.13.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: docker
|
||||
user_password: training
|
||||
|
||||
@@ -10,21 +10,20 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.25.4
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: k8s
|
||||
user_password: training
|
||||
|
||||
steps:
|
||||
- deploy
|
||||
- wait
|
||||
- clusterize
|
||||
- tools
|
||||
- docker
|
||||
- createuser
|
||||
- webssh
|
||||
- tailhist
|
||||
- kube
|
||||
- kubetools
|
||||
- cards
|
||||
- kubetest
|
||||
- cards
|
||||
- ips
|
||||
|
||||
25
prepare-vms/settings/portal.yaml
Normal file
25
prepare-vms/settings/portal.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
# This file is passed by trainer-cli to scripts/ips-txt-to-html.py
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: CHANGEME
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Login and password that students will use
|
||||
user_login: portal
|
||||
user_password: CHANGEME
|
||||
|
||||
steps:
|
||||
- wait
|
||||
- clusterize
|
||||
- tools
|
||||
- docker
|
||||
- createuser
|
||||
- ips
|
||||
@@ -12,12 +12,6 @@ cards_template: cards.html
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
# Login and password that students will use
|
||||
user_login: docker
|
||||
user_password: training
|
||||
|
||||
@@ -1,24 +1,9 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
retry () {
|
||||
N=$1
|
||||
I=0
|
||||
shift
|
||||
|
||||
while ! "$@"; do
|
||||
I=$(($I+1))
|
||||
if [ $I -gt $N ]; then
|
||||
echo "FAILED, ABORTING"
|
||||
exit 1
|
||||
fi
|
||||
echo "FAILED, RETRYING ($I/$N)"
|
||||
done
|
||||
}
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.small
|
||||
|
||||
INFRA=infra/aws-eu-west-3
|
||||
INFRA=infra/aws-us-east-2
|
||||
|
||||
STUDENTS=2
|
||||
|
||||
@@ -32,13 +17,6 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kubenet
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
@@ -47,13 +25,6 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kuberouter
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
@@ -62,13 +33,6 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
#INFRA=infra/aws-us-west-1
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.medium
|
||||
@@ -80,8 +44,3 @@ TAG=$PREFIX-$SETTINGS
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.19.11
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
|
||||
@@ -230,9 +230,9 @@ function scale() {
|
||||
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
|
||||
{% endif %}
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td class="logpass">{{ user_login }}</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
<tr><td class="logpass">{{ user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
resource "openstack_compute_keypair_v2" "ssh_deploy_key" {
|
||||
name = "${var.prefix}"
|
||||
public_key = "${file("~/.ssh/id_rsa.pub")}"
|
||||
name = var.prefix
|
||||
public_key = file("~/.ssh/id_rsa.pub")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,32 +1,34 @@
|
||||
resource "openstack_compute_instance_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
|
||||
image_name = "Ubuntu 18.04.4 20200324"
|
||||
flavor_name = "${var.flavor}"
|
||||
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
|
||||
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"
|
||||
count = var.how_many_nodes
|
||||
name = format("%s-%04d", var.prefix, count.index+1)
|
||||
image_name = var.image
|
||||
flavor_name = var.flavor
|
||||
security_groups = [openstack_networking_secgroup_v2.full_access.name]
|
||||
key_pair = openstack_compute_keypair_v2.ssh_deploy_key.name
|
||||
|
||||
network {
|
||||
name = "${openstack_networking_network_v2.internal.name}"
|
||||
fixed_ip_v4 = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
|
||||
name = openstack_networking_network_v2.internal.name
|
||||
fixed_ip_v4 = cidrhost(openstack_networking_subnet_v2.internal.cidr, count.index+10)
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
count = var.how_many_nodes
|
||||
# This is something provided to us by Enix when our tenant was provisioned.
|
||||
pool = "Public Floating"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
floating_ip = "${openstack_compute_floatingip_v2.machine.*.address[count.index]}"
|
||||
instance_id = "${openstack_compute_instance_v2.machine.*.id[count.index]}"
|
||||
fixed_ip = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
|
||||
count = var.how_many_nodes
|
||||
floating_ip = openstack_compute_floatingip_v2.machine.*.address[count.index]
|
||||
instance_id = openstack_compute_instance_v2.machine.*.id[count.index]
|
||||
fixed_ip = cidrhost(openstack_networking_subnet_v2.internal.cidr, count.index+10)
|
||||
}
|
||||
|
||||
output "ip_addresses" {
|
||||
value = "${join("\n", openstack_compute_floatingip_v2.machine.*.address)}"
|
||||
value = join("", formatlist("%s\n", openstack_compute_floatingip_v2.machine.*.address))
|
||||
}
|
||||
|
||||
variable "flavor" {}
|
||||
|
||||
variable "image" {}
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
resource "openstack_networking_network_v2" "internal" {
|
||||
name = "${var.prefix}"
|
||||
name = var.prefix
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "internal" {
|
||||
name = "${var.prefix}"
|
||||
network_id = "${openstack_networking_network_v2.internal.id}"
|
||||
name = var.prefix
|
||||
network_id = openstack_networking_network_v2.internal.id
|
||||
cidr = "10.10.0.0/16"
|
||||
ip_version = 4
|
||||
dns_nameservers = ["1.1.1.1"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_v2" "router" {
|
||||
name = "${var.prefix}"
|
||||
name = var.prefix
|
||||
external_network_id = "15f0c299-1f50-42a6-9aff-63ea5b75f3fc"
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "router_internal" {
|
||||
router_id = "${openstack_networking_router_v2.router.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.internal.id}"
|
||||
router_id = openstack_networking_router_v2.router.id
|
||||
subnet_id = openstack_networking_subnet_v2.internal.id
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
terraform {
|
||||
required_version = ">= 1"
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.45.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "openstack" {
|
||||
user_name = "${var.user}"
|
||||
tenant_name = "${var.tenant}"
|
||||
domain_name = "${var.domain}"
|
||||
password = "${var.password}"
|
||||
auth_url = "${var.auth_url}"
|
||||
user_name = var.user
|
||||
tenant_name = var.tenant
|
||||
domain_name = var.domain
|
||||
password = var.password
|
||||
auth_url = var.auth_url
|
||||
}
|
||||
|
||||
variable "user" {}
|
||||
|
||||
@@ -7,6 +7,6 @@ resource "openstack_networking_secgroup_rule_v2" "full_access" {
|
||||
ethertype = "IPv4"
|
||||
protocol = ""
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.full_access.id}"
|
||||
security_group_id = openstack_networking_secgroup_v2.full_access.id
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
variable "prefix" {
|
||||
type = "string"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "count" {
|
||||
type = "string"
|
||||
variable "how_many_nodes" {
|
||||
type = number
|
||||
}
|
||||
|
||||
|
||||
@@ -15,13 +15,14 @@ for lib in lib/*.sh; do
|
||||
done
|
||||
|
||||
DEPENDENCIES="
|
||||
ssh
|
||||
curl
|
||||
fping
|
||||
jq
|
||||
pssh
|
||||
wkhtmltopdf
|
||||
man
|
||||
pssh
|
||||
ssh
|
||||
wkhtmltopdf
|
||||
yq
|
||||
"
|
||||
|
||||
# Check for missing dependencies, and issue a warning if necessary.
|
||||
|
||||
56
slides/1.yml
Normal file
56
slides/1.yml
Normal file
@@ -0,0 +1,56 @@
|
||||
title: |
|
||||
Docker & Kubernetes
|
||||
Part 1 - Docker
|
||||
|
||||
chat: "[Teams](https://teams.microsoft.com/l/channel/19%3arctk01XQVWxbj6pjTtJDfVd0_QOzfzYe7Xt8VDpl9681%40thread.tacv2/General?groupId=89c621d8-7080-447f-a7eb-9d6704776dd5&tenantId=72aa0d83-624a-4ebf-a683-1b9b45548610)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-11-derivco.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- # DAY 2
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Getting_Inside.md
|
||||
-
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- shared/thankyou.md
|
||||
- # EXTRA
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Network_Drivers.md
|
||||
@@ -2,7 +2,7 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /kube.yml.html 200!
|
||||
/ /1.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
80
slides/autopilot/package-lock.json
generated
80
slides/autopilot/package-lock.json
generated
@@ -321,9 +321,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/forwarded": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
|
||||
"integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=",
|
||||
"version": "0.2.0",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
|
||||
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
@@ -431,19 +431,19 @@
|
||||
}
|
||||
},
|
||||
"node_modules/mime-db": {
|
||||
"version": "1.47.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz",
|
||||
"integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==",
|
||||
"version": "1.48.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz",
|
||||
"integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-types": {
|
||||
"version": "2.1.30",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz",
|
||||
"integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==",
|
||||
"version": "2.1.31",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz",
|
||||
"integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==",
|
||||
"dependencies": {
|
||||
"mime-db": "1.47.0"
|
||||
"mime-db": "1.48.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
@@ -497,11 +497,11 @@
|
||||
"integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
|
||||
},
|
||||
"node_modules/proxy-addr": {
|
||||
"version": "2.0.6",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz",
|
||||
"integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==",
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
|
||||
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
|
||||
"dependencies": {
|
||||
"forwarded": "~0.1.2",
|
||||
"forwarded": "0.2.0",
|
||||
"ipaddr.js": "1.9.1"
|
||||
},
|
||||
"engines": {
|
||||
@@ -750,9 +750,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/ws": {
|
||||
"version": "7.4.5",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.5.tgz",
|
||||
"integrity": "sha512-xzyu3hFvomRfXKH8vOFMU3OguG6oOvhXMo3xsGy3xWExqaM2dxBbVxuD99O7m3ZUFMvvscsZDqxfgMaRr/Nr1g==",
|
||||
"version": "7.4.6",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.6.tgz",
|
||||
"integrity": "sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A==",
|
||||
"engines": {
|
||||
"node": ">=8.3.0"
|
||||
},
|
||||
@@ -770,9 +770,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/xmlhttprequest-ssl": {
|
||||
"version": "1.6.2",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.6.2.tgz",
|
||||
"integrity": "sha512-tYOaldF/0BLfKuoA39QMwD4j2m8lq4DIncqj1yuNELX4vz9+z/ieG/vwmctjJce+boFHXstqhWnHSxc4W8f4qg==",
|
||||
"version": "1.6.3",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.6.3.tgz",
|
||||
"integrity": "sha512-3XfeQE/wNkvrIktn2Kf0869fC0BN6UpydVasGIeSm2B1Llihf7/0UfZM+eCkOw3P7bP4+qPgqhm7ZoxuJtFU0Q==",
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
}
|
||||
@@ -1051,9 +1051,9 @@
|
||||
}
|
||||
},
|
||||
"forwarded": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
|
||||
"integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
|
||||
"version": "0.2.0",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
|
||||
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="
|
||||
},
|
||||
"fresh": {
|
||||
"version": "0.5.2",
|
||||
@@ -1134,16 +1134,16 @@
|
||||
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
|
||||
},
|
||||
"mime-db": {
|
||||
"version": "1.47.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz",
|
||||
"integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw=="
|
||||
"version": "1.48.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz",
|
||||
"integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ=="
|
||||
},
|
||||
"mime-types": {
|
||||
"version": "2.1.30",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz",
|
||||
"integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==",
|
||||
"version": "2.1.31",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz",
|
||||
"integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==",
|
||||
"requires": {
|
||||
"mime-db": "1.47.0"
|
||||
"mime-db": "1.48.0"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
@@ -1185,11 +1185,11 @@
|
||||
"integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
|
||||
},
|
||||
"proxy-addr": {
|
||||
"version": "2.0.6",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz",
|
||||
"integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==",
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
|
||||
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
|
||||
"requires": {
|
||||
"forwarded": "~0.1.2",
|
||||
"forwarded": "0.2.0",
|
||||
"ipaddr.js": "1.9.1"
|
||||
}
|
||||
},
|
||||
@@ -1408,15 +1408,15 @@
|
||||
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
|
||||
},
|
||||
"ws": {
|
||||
"version": "7.4.5",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.5.tgz",
|
||||
"integrity": "sha512-xzyu3hFvomRfXKH8vOFMU3OguG6oOvhXMo3xsGy3xWExqaM2dxBbVxuD99O7m3ZUFMvvscsZDqxfgMaRr/Nr1g==",
|
||||
"version": "7.4.6",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.6.tgz",
|
||||
"integrity": "sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A==",
|
||||
"requires": {}
|
||||
},
|
||||
"xmlhttprequest-ssl": {
|
||||
"version": "1.6.2",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.6.2.tgz",
|
||||
"integrity": "sha512-tYOaldF/0BLfKuoA39QMwD4j2m8lq4DIncqj1yuNELX4vz9+z/ieG/vwmctjJce+boFHXstqhWnHSxc4W8f4qg=="
|
||||
"version": "1.6.3",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.6.3.tgz",
|
||||
"integrity": "sha512-3XfeQE/wNkvrIktn2Kf0869fC0BN6UpydVasGIeSm2B1Llihf7/0UfZM+eCkOw3P7bP4+qPgqhm7ZoxuJtFU0Q=="
|
||||
},
|
||||
"yeast": {
|
||||
"version": "0.1.2",
|
||||
|
||||
@@ -119,25 +119,19 @@ New networks can be created (with `docker network create`).
|
||||
|
||||
class: pic
|
||||
|
||||
## Single container in a Docker network
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Two containers on a single Docker network
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Two containers on two Docker networks
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -337,3 +337,9 @@ class: extra-details
|
||||
<br/>
|
||||
data should always be stored in volumes anyway!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Copy-on-write filesystems
|
||||
:EN:- Docker graph drivers
|
||||
:FR:- Les systèmes de fichiers "copy-on-write"
|
||||
:FR:- Les "graph drivers" de Docker
|
||||
|
||||
@@ -1122,3 +1122,12 @@ See `man capabilities` for the full list and details.
|
||||
- LSMs add a layer of access control to all process operations.
|
||||
|
||||
- Container engines take care of this so you don't have to.
|
||||
|
||||
???
|
||||
|
||||
:EN:Containers internals
|
||||
:EN:- Linux kernel namespaces
|
||||
:EN:- Control groups (cgroups)
|
||||
:FR:Fonctionnement interne des conteneurs
|
||||
:FR:- Les namespaces du noyau Linux
|
||||
:FR:- Les "control groups" (cgroups)
|
||||
|
||||
@@ -436,3 +436,7 @@ It depends on:
|
||||
|
||||
- false, if we focus on what matters.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Orchestration overview
|
||||
:FR:- Survol de techniques d'orchestration
|
||||
|
||||
@@ -122,3 +122,8 @@ There might be a long pause before the first layer is pulled,
|
||||
because the API behind `docker pull` doesn't allow to stream build logs, and there is no feedback during the build.
|
||||
|
||||
It is possible to view the build logs by setting up an account on [ctr.run](https://ctr.run/).
|
||||
|
||||
???
|
||||
|
||||
:EN:- Publishing images to the Docker Hub
|
||||
:FR:- Publier des images sur le Docker Hub
|
||||
|
||||
@@ -159,6 +159,24 @@ Volumes are not anchored to a specific path.
|
||||
|
||||
---
|
||||
|
||||
## Populating volumes
|
||||
|
||||
* When an empty volume is mounted on a non-empty directory, the directory is copied to the volume.
|
||||
|
||||
* This makes it easy to "promote" a normal directory to a volume.
|
||||
|
||||
* Non-empty volumes are always mounted as-is.
|
||||
|
||||
Let's populate the webapps volume with the webapps.dist directory from the Tomcat image.
|
||||
|
||||
````bash
|
||||
$ docker run -v webapps:/usr/local/tomcat/webapps.dist tomcat true
|
||||
```
|
||||
|
||||
Note: running `true` will cause the container to exit successfully once the `webapps.dist` directory has been copied to the `webapps` volume, instead of starting tomcat.
|
||||
|
||||
---
|
||||
|
||||
## Using our named volumes
|
||||
|
||||
* Volumes are used with the `-v` option.
|
||||
|
||||
11
slides/exercises/healthchecks-brief.md
Normal file
11
slides/exercises/healthchecks-brief.md
Normal file
@@ -0,0 +1,11 @@
|
||||
## Exercise — Healthchecks
|
||||
|
||||
- Add readiness and liveness probes to a web service
|
||||
|
||||
(we will use the `rng` service in the dockercoins app)
|
||||
|
||||
- Observe the correct behavior of the readiness probe
|
||||
|
||||
(when deploying e.g. an invalid image)
|
||||
|
||||
- Observe the behavior of the liveness probe
|
||||
37
slides/exercises/healthchecks-details.md
Normal file
37
slides/exercises/healthchecks-details.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Exercise — Healthchecks
|
||||
|
||||
- We want to add healthchecks to the `rng` service in dockercoins
|
||||
|
||||
- First, deploy a new copy of dockercoins
|
||||
|
||||
- Then, add a readiness probe on the `rng` service
|
||||
|
||||
(using a simple HTTP check on the `/` route of the service)
|
||||
|
||||
- Check what happens when deploying an invalid image for `rng` (e.g. `alpine`)
|
||||
|
||||
- Then roll back `rng` to the original image and add a liveness probe
|
||||
|
||||
(with the same parameters)
|
||||
|
||||
- Scale up the `worker` service (to 15+ workers) and observe
|
||||
|
||||
- What happens, and how can we improve the situation?
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
- *Before* adding the readiness probe:
|
||||
|
||||
updating the image of the `rng` service with `alpine` should break it
|
||||
|
||||
- *After* adding the readiness probe:
|
||||
|
||||
updating the image of the `rng` service with `alpine` shouldn't break it
|
||||
|
||||
- When adding the liveness probe, nothing special should happen
|
||||
|
||||
- Scaling the `worker` service will then cause disruptions
|
||||
|
||||
- The final goal is to understand why, and how to fix it
|
||||
13
slides/exercises/helm-generic-chart-brief.md
Normal file
13
slides/exercises/helm-generic-chart-brief.md
Normal file
@@ -0,0 +1,13 @@
|
||||
## Exercise — Helm Charts
|
||||
|
||||
- Create a Helm chart to deploy a generic microservice
|
||||
|
||||
- Deploy dockercoins by instanciating that chart multiple times
|
||||
|
||||
- Bonus: have as little values as possible
|
||||
|
||||
- Bonus: handle healthchecks for HTTP services
|
||||
|
||||
- Bonus: make it easy to change image versions
|
||||
|
||||
- Bonus: make it easy to use images on a different registry
|
||||
86
slides/exercises/helm-generic-chart-details.md
Normal file
86
slides/exercises/helm-generic-chart-details.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Exercise — Helm Charts
|
||||
|
||||
- We want to deploy dockercoins with a Helm chart
|
||||
|
||||
- We want to have a "generic chart" and instantiate it 5 times
|
||||
|
||||
(once for each service)
|
||||
|
||||
- We will pass values to the chart to customize it for each component
|
||||
|
||||
(to indicate which image to use, which ports to expose, etc.)
|
||||
|
||||
- We'll use `helm create` as a starting point for our generic chart
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
- Have a directory with the generic chart
|
||||
|
||||
(e.g. `generic-chart`)
|
||||
|
||||
- Have 5 value files
|
||||
|
||||
(e.g. `hasher.yml`, `redis.yml`, `rng.yml`, `webui.yml`, `worker.yml`)
|
||||
|
||||
- Be able to install dockercoins by running 5 times:
|
||||
|
||||
`helm install X ./generic-chart --values=X.yml`
|
||||
|
||||
---
|
||||
|
||||
## Hints
|
||||
|
||||
- There are many little things to tweak in the generic chart
|
||||
|
||||
(service names, port numbers, healthchecks...)
|
||||
|
||||
- Check the training slides if you need a refresher!
|
||||
|
||||
---
|
||||
|
||||
## Bonus 1
|
||||
|
||||
- Minimize the amount of values that have to be set
|
||||
|
||||
- Option 1: no values at all for `rng` and `hasher`
|
||||
|
||||
(default values assume HTTP service listening on port 80)
|
||||
|
||||
- Option 2: no values at all for `worker`
|
||||
|
||||
(default values assume worker container with no service)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Bonus 2
|
||||
|
||||
- Handle healthchecks
|
||||
|
||||
- Make sure that healthchecks are enabled in HTTP services
|
||||
|
||||
- ...But not in Redis or in the worker
|
||||
|
||||
---
|
||||
|
||||
## Bonus 3
|
||||
|
||||
- Make it easy to change image versions
|
||||
|
||||
- E.g. change `v0.1` to `v0.2` by changing only *one* thing in *one* place
|
||||
|
||||
---
|
||||
|
||||
## Bonus 4
|
||||
|
||||
- Make it easy to use images on a different registry
|
||||
|
||||
- We can assume that the images will always have the same names
|
||||
|
||||
(`hasher`, `rng`, `webui`, `worker`)
|
||||
|
||||
- And the same tag
|
||||
|
||||
(`v0.1`)
|
||||
9
slides/exercises/helm-umbrella-chart-brief.md
Normal file
9
slides/exercises/helm-umbrella-chart-brief.md
Normal file
@@ -0,0 +1,9 @@
|
||||
## Exercise — Umbrella Charts
|
||||
|
||||
- Create a Helm chart with dependencies on other charts
|
||||
|
||||
(leveraging the generic chart created earlier)
|
||||
|
||||
- Deploy dockercoins with that chart
|
||||
|
||||
- Bonus: use an external chart for the redis component
|
||||
77
slides/exercises/helm-umbrella-chart-details.md
Normal file
77
slides/exercises/helm-umbrella-chart-details.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# Exercise — Umbrella Charts
|
||||
|
||||
- We want to deploy dockercoins with a single Helm chart
|
||||
|
||||
- That chart will reuse the "generic chart" created previously
|
||||
|
||||
- This will require expressing dependencies, and using the `alias` keyword
|
||||
|
||||
- It will also require minor changes in the templates
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
- We want to be able to install a copy of dockercoins with:
|
||||
```bash
|
||||
helm install dockercoins ./umbrella-chart
|
||||
```
|
||||
|
||||
- It should leverage the generic chart created earlier
|
||||
|
||||
(and instanciate it five times, one time per component of dockercoins)
|
||||
|
||||
- The values YAML files created earlier should be merged in a single one
|
||||
|
||||
---
|
||||
|
||||
## Bonus
|
||||
|
||||
- We want to replace our redis component with a better one
|
||||
|
||||
- We're going to use Bitnami's redis chart
|
||||
|
||||
(find it on the Artifact Hub)
|
||||
|
||||
- However, a lot of adjustments will be required!
|
||||
|
||||
(check following slides if you need hints)
|
||||
|
||||
---
|
||||
|
||||
## Hints (1/2)
|
||||
|
||||
- We will probably have to disable persistence
|
||||
|
||||
- by default, the chart enables persistence
|
||||
|
||||
- this works only if we have a default StorageClass
|
||||
|
||||
- this can be disabled by setting a value
|
||||
|
||||
- We will also have to disable authentication
|
||||
|
||||
- by default, the chart generates a password for Redis
|
||||
|
||||
- the dockercoins code doesn't use one
|
||||
|
||||
- this can also be changed by setting a value
|
||||
|
||||
---
|
||||
|
||||
## Hints (2/2)
|
||||
|
||||
- The dockercoins code connects to `redis`
|
||||
|
||||
- The chart generates different service names
|
||||
|
||||
- Option 1:
|
||||
|
||||
- vendor the chart in our umbrella chart
|
||||
- change the service name in the chart
|
||||
|
||||
- Option 2:
|
||||
|
||||
- add a Service of type ExternalName
|
||||
- it will be a DNS alias from `redis` to `redis-whatever.NAMESPACE.svc.cluster.local`
|
||||
- for extra points, make the domain configurable
|
||||
9
slides/exercises/ingress-brief.md
Normal file
9
slides/exercises/ingress-brief.md
Normal file
@@ -0,0 +1,9 @@
|
||||
## Exercise — Ingress
|
||||
|
||||
- Add an ingress controller to a Kubernetes cluster
|
||||
|
||||
- Create an ingress resource for a web app on that cluster
|
||||
|
||||
- Challenge: accessing/exposing port 80
|
||||
|
||||
(different methods depending on how the cluster was deployed)
|
||||
47
slides/exercises/ingress-details.md
Normal file
47
slides/exercises/ingress-details.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Exercise — Ingress
|
||||
|
||||
- We want to expose a web app through an ingress controller
|
||||
|
||||
- This will require:
|
||||
|
||||
- the web app itself (dockercoins, NGINX, whatever we want)
|
||||
|
||||
- an ingress controller (we suggest Traefik)
|
||||
|
||||
- a domain name (`use \*.nip.io` or `\*.localdev.me`)
|
||||
|
||||
- an ingress resource
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
- We want to be able to access the web app using an URL like:
|
||||
|
||||
http://webapp.localdev.me
|
||||
|
||||
*or*
|
||||
|
||||
http://webapp.A.B.C.D.nip.io
|
||||
|
||||
(where A.B.C.D is the IP address of one of our nodes)
|
||||
|
||||
---
|
||||
|
||||
## Hints
|
||||
|
||||
- Traefik can be installed with Helm
|
||||
|
||||
(it can be found on the Artifact Hub)
|
||||
|
||||
- If using Kubernetes 1.22+, make sure to use Traefik 2.5+
|
||||
|
||||
- If our cluster supports LoadBalancer Services: easy
|
||||
|
||||
(nothing special to do)
|
||||
|
||||
- For local clusters, things can be more difficult; two options:
|
||||
|
||||
- map localhost:80 to e.g. a NodePort service, and use `\*.localdev.me`
|
||||
|
||||
- use hostNetwork, or ExternalIP, and use `\*.nip.io`
|
||||
15
slides/exercises/ingress-secret-policy-brief.md
Normal file
15
slides/exercises/ingress-secret-policy-brief.md
Normal file
@@ -0,0 +1,15 @@
|
||||
## Exercise — Ingress Secret Policy
|
||||
|
||||
*Implement policy to limit impact of ingress controller vulnerabilities.*
|
||||
|
||||
(Mitigate e.g. CVE-2021-25742)
|
||||
|
||||
- Deploy an ingress controller and cert-manager
|
||||
|
||||
- Deploy a trivial web app secured with TLS
|
||||
|
||||
(obtaining a cert with cert-manager + Let's Encrypt)
|
||||
|
||||
- Prevent ingress controller from reading arbitrary secrets
|
||||
|
||||
- Automatically grant selective access to TLS secrets, but not other secrets
|
||||
93
slides/exercises/ingress-secret-policy-details.md
Normal file
93
slides/exercises/ingress-secret-policy-details.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Exercise — Ingress Secret Policy
|
||||
|
||||
- Most ingress controllers have access to all Secrets
|
||||
|
||||
(so that they can access TLS keys and certs, which are stored in Secrets)
|
||||
|
||||
- Ingress controller vulnerability can lead to full cluster compromise
|
||||
|
||||
(by allowing attacker to access all secrets, including API tokens)
|
||||
|
||||
- See for instance [CVE-2021-25742](https://github.com/kubernetes/ingress-nginx/issues/7837)
|
||||
|
||||
- How can we prevent that?
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Ingress Controller
|
||||
|
||||
- Deploy an Ingress Controller
|
||||
|
||||
(e.g. Traefik or NGINX; you can use @@LINK[k8s/traefik-v2.yaml])
|
||||
|
||||
- Create a trivial web app (e.g. NGINX, `jpetazzo/color`...)
|
||||
|
||||
- Expose it with an Ingress
|
||||
|
||||
(e.g. use `app.<ip-address>.nip.io`)
|
||||
|
||||
- Check that you can access it through `http://app.<ip-address>.nip.io`
|
||||
|
||||
---
|
||||
|
||||
## Step 2: cert-manager
|
||||
|
||||
- Deploy cert-manager
|
||||
|
||||
- Create a ClusterIssuer using Let's Encrypt staging environment
|
||||
|
||||
(e.g. with @@LINK[k8s/cm-clusterissuer.yaml])
|
||||
|
||||
- Create an Ingress for the app, with TLS enabled
|
||||
|
||||
(e.g. use `appsecure.<ip-address>.nip.io`)
|
||||
|
||||
- Tell cert-manager to obtain a certificate for that Ingress
|
||||
|
||||
- option 1: manually create a Certificate (e.g. with @@LINK[k8s/cm-certificate.yaml])
|
||||
|
||||
- option 2: use the `cert-manager.io/cluster-issuer` annotation
|
||||
|
||||
- Check that you get the Let's Encrypt certificate was issued
|
||||
|
||||
---
|
||||
|
||||
## Step 3: RBAC
|
||||
|
||||
- Remove the Ingress Controller's permission to read all Secrets
|
||||
|
||||
- Restart the Ingress Controller
|
||||
|
||||
- Check that https://appsecure doesn't serve the Let's Encrypt cert
|
||||
|
||||
- Grant permission to read the certificate's Secret
|
||||
|
||||
- Check that https://appsecure serve the Let's Encrypt cert again
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Kyverno
|
||||
|
||||
- Install Kyverno
|
||||
|
||||
- Write a Kyverno policy to automatically grant permission to read Secrets
|
||||
|
||||
(e.g. when a cert-manager Certificate is created)
|
||||
|
||||
- Check @@LINK[k8s/kyverno-namespace-setup.yaml] for inspiration
|
||||
|
||||
- Hint: you need to automatically create a Role and RoleBinding
|
||||
|
||||
- Create another app + another Ingress with TLS
|
||||
|
||||
- Check that the Certificate, Secret, Role, RoleBinding are created
|
||||
|
||||
- Check that the new app correctly serves the Let's Encrypt cert
|
||||
|
||||
---
|
||||
|
||||
## Step 5: double-check
|
||||
|
||||
- Check that the Ingres Controller can't access other secrets
|
||||
|
||||
(e.g. by manually creating a Secret and checking with `kubectl exec`?)
|
||||
7
slides/exercises/k8sfundamentals-brief.md
Normal file
7
slides/exercises/k8sfundamentals-brief.md
Normal file
@@ -0,0 +1,7 @@
|
||||
## Exercise — Deploy Dockercoins
|
||||
|
||||
- Deploy the dockercoins application to our Kubernetes cluster
|
||||
|
||||
- Connect components together
|
||||
|
||||
- Expose the web UI and open it in a web browser to check that it works
|
||||
47
slides/exercises/k8sfundamentals-details.md
Normal file
47
slides/exercises/k8sfundamentals-details.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Exercise — Deploy Dockercoins
|
||||
|
||||
- We want to deploy the dockercoins app
|
||||
|
||||
- There are 5 components in the app:
|
||||
|
||||
hasher, redis, rng, webui, worker
|
||||
|
||||
- We'll use one Deployment for each component
|
||||
|
||||
(see next slide for the images to use)
|
||||
|
||||
- We'll connect them with Services
|
||||
|
||||
- We'll check that we can access the web UI in a browser
|
||||
|
||||
---
|
||||
|
||||
## Images
|
||||
|
||||
- hasher → `dockercoins/hasher:v0.1`
|
||||
|
||||
- redis → `redis`
|
||||
|
||||
- rng → `dockercoins/rng:v0.1`
|
||||
|
||||
- webui → `dockercoins/webui:v0.1`
|
||||
|
||||
- worker → `dockercoins/worker:v0.1`
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
- We should be able to see the web UI in our browser
|
||||
|
||||
(with the graph showing approximatiely 3-4 hashes/second)
|
||||
|
||||
---
|
||||
|
||||
## Hints
|
||||
|
||||
- Make sure to expose services with the right ports
|
||||
|
||||
(check the logs of the worker; they indicate the port numbers)
|
||||
|
||||
- The web UI can be exposed with a NodePort Service
|
||||
9
slides/exercises/localcluster-brief.md
Normal file
9
slides/exercises/localcluster-brief.md
Normal file
@@ -0,0 +1,9 @@
|
||||
## Exercise — Local Cluster
|
||||
|
||||
- Deploy a local Kubernetes cluster if you don't already have one
|
||||
|
||||
- Deploy dockercoins on that cluster
|
||||
|
||||
- Connect to the web UI in your browser
|
||||
|
||||
- Scale up dockercoins
|
||||
43
slides/exercises/localcluster-details.md
Normal file
43
slides/exercises/localcluster-details.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Exercise — Local Cluster
|
||||
|
||||
- We want to have our own local Kubernetes cluster
|
||||
|
||||
(we can use Docker Desktop, KinD, minikube... anything will do!)
|
||||
|
||||
- Then we want to run a copy of dockercoins on that cluster
|
||||
|
||||
- We want to be able to connect to the web UI
|
||||
|
||||
(we can expose the port, or use port-forward, or whatever)
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
- Be able to see the dockercoins web UI running on our local cluster
|
||||
|
||||
---
|
||||
|
||||
## Hints
|
||||
|
||||
- On a Mac or Windows machine:
|
||||
|
||||
the easiest solution is probably Docker Desktop
|
||||
|
||||
- On a Linux machine:
|
||||
|
||||
the easiest solution is probably KinD or k3d
|
||||
|
||||
- To connect to the web UI:
|
||||
|
||||
`kubectl port-forward` is probably the easiest solution
|
||||
|
||||
---
|
||||
|
||||
## Bonus
|
||||
|
||||
- If you already have a local Kubernetes cluster:
|
||||
|
||||
try to run another one!
|
||||
|
||||
- Try to use another method than `kubectl port-forward`
|
||||
13
slides/exercises/sealed-secrets-brief.md
Normal file
13
slides/exercises/sealed-secrets-brief.md
Normal file
@@ -0,0 +1,13 @@
|
||||
## Exercise — Sealed Secrets
|
||||
|
||||
- Install the sealed secrets operator
|
||||
|
||||
- Create a secret, seal it, load it in the cluster
|
||||
|
||||
- Check that sealed secrets are "locked"
|
||||
|
||||
(can't be used with a different name, namespace, or cluster)
|
||||
|
||||
- Bonus: migrate a sealing key to another cluster
|
||||
|
||||
- Set RBAC permissions to grant selective access to secrets
|
||||
117
slides/exercises/sealed-secrets-details.md
Normal file
117
slides/exercises/sealed-secrets-details.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# Exercise — Sealed Secrets
|
||||
|
||||
This is a "combo exercise" to practice the following concepts:
|
||||
|
||||
- Secrets (exposing them in containers)
|
||||
|
||||
- RBAC (granting specific permissions to specific users)
|
||||
|
||||
- Operators (specifically, sealed secrets)
|
||||
|
||||
- Migrations (copying/transferring resources from a cluster to another)
|
||||
|
||||
For this exercise, you will need two clusters.
|
||||
|
||||
(It can be two local clusters.)
|
||||
|
||||
We will call them "dev cluster" and "prod cluster".
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
- For simplicity, our application will be NGINX (or `jpetazzo/color`)
|
||||
|
||||
- Our application needs two secrets:
|
||||
|
||||
- `logging_api_token` (not too sensitive; same in dev and prod)
|
||||
|
||||
- `database_password` (sensitive; different in dev and prod)
|
||||
|
||||
- Secrets can be exposed as env vars, or mounted in volumes
|
||||
|
||||
(it doesn't matter for this exercise)
|
||||
|
||||
- We want to prepare and deploy the application in the dev cluster
|
||||
|
||||
- ...Then deploy it to the prod cluster
|
||||
|
||||
---
|
||||
|
||||
## Step 1 (easy)
|
||||
|
||||
- On the dev cluster, create a Namespace called `dev`
|
||||
|
||||
- Create the two secrets, `logging_api_token` and `database_password`
|
||||
|
||||
(the content doesn't matter; put a random string of your choice)
|
||||
|
||||
- Create a Deployment called `app` using both secrets
|
||||
|
||||
(use a mount or environment variables; whatever you prefer!)
|
||||
|
||||
- Verify that the secrets are available to the Deployment
|
||||
|
||||
(e.g. with `kubectl exec`)
|
||||
|
||||
- Generate YAML manifests for the application (Deployment+Secrets)
|
||||
|
||||
---
|
||||
|
||||
## Step 2 (medium)
|
||||
|
||||
- Deploy the sealed secrets operator on the dev cluster
|
||||
|
||||
- In the YAML, replace the Secrets with SealedSecrets
|
||||
|
||||
- Delete the `dev` Namespace, recreate it, redeploy the app
|
||||
|
||||
(to make sure everything works fine)
|
||||
|
||||
- Create a `staging` Namespace and try to deploy the app
|
||||
|
||||
- If something doesn't work, fix it
|
||||
|
||||
--
|
||||
|
||||
- Hint: set the *scope* of the sealed secrets
|
||||
|
||||
---
|
||||
|
||||
## Step 3 (hard)
|
||||
|
||||
- On the prod cluster, create a Namespace called `prod`
|
||||
|
||||
- Try to deploy the application using the YAML manifests
|
||||
|
||||
- It won't work (the cluster needs the sealing key)
|
||||
|
||||
- Fix it!
|
||||
|
||||
(check the next slides if you need hints)
|
||||
|
||||
--
|
||||
|
||||
- You will have to copy the Sealed Secret private key
|
||||
|
||||
--
|
||||
|
||||
- And restart the operator so that it picks up the key
|
||||
|
||||
---
|
||||
|
||||
## Step 4 (medium)
|
||||
|
||||
Let's say that we have a user called `alice` on the prod cluster.
|
||||
|
||||
(You can use `kubectl --as=alice` to impersonate her.)
|
||||
|
||||
We want Alice to be able to:
|
||||
|
||||
- deploy the whole application in the `prod` namespace
|
||||
|
||||
- access the `logging_api_token` secret
|
||||
|
||||
- but *not* the `database_password` secret
|
||||
|
||||
- view the logs of the app
|
||||
@@ -1,21 +1,20 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="1600"
|
||||
height="900"
|
||||
viewBox="0 0 1600 900"
|
||||
version="1.1"
|
||||
id="svg696"
|
||||
sodipodi:docname="stacked-control-plane.svg"
|
||||
inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
|
||||
enable-background="new">
|
||||
inkscape:version="1.1 (c4e8f9ed74, 2021-05-24)"
|
||||
enable-background="new"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<metadata
|
||||
id="metadata700">
|
||||
<rdf:RDF>
|
||||
@@ -37,20 +36,21 @@
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="1916"
|
||||
inkscape:window-height="1032"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1080"
|
||||
id="namedview698"
|
||||
showgrid="false"
|
||||
inkscape:zoom="0.64"
|
||||
inkscape:cx="846.21952"
|
||||
inkscape:cy="490.81599"
|
||||
inkscape:cx="846.09375"
|
||||
inkscape:cy="491.40625"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="18"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="0"
|
||||
inkscape:current-layer="g11538"
|
||||
inkscape:current-layer="svg696"
|
||||
units="px"
|
||||
inkscape:snap-object-midpoints="true"
|
||||
inkscape:document-rotation="0" />
|
||||
inkscape:document-rotation="0"
|
||||
inkscape:pagecheckerboard="0" />
|
||||
<title
|
||||
id="title304">how-does-k8s-work</title>
|
||||
<style
|
||||
@@ -814,7 +814,7 @@
|
||||
</marker>
|
||||
<linearGradient
|
||||
id="linearGradient15544"
|
||||
osb:paint="solid">
|
||||
inkscape:swatch="solid">
|
||||
<stop
|
||||
style="stop-color:#f7fe9a;stop-opacity:1;"
|
||||
offset="0"
|
||||
@@ -877,10 +877,10 @@
|
||||
id="path6539" />
|
||||
</marker>
|
||||
<filter
|
||||
x="-0.039000001"
|
||||
y="-0.096999995"
|
||||
width="1.077"
|
||||
height="1.181"
|
||||
x="-0.032283688"
|
||||
y="-0.071219566"
|
||||
width="1.0645674"
|
||||
height="1.1697002"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1">
|
||||
<feOffset
|
||||
@@ -911,10 +911,10 @@
|
||||
</feMerge>
|
||||
</filter>
|
||||
<filter
|
||||
x="-0.039000001"
|
||||
y="-0.096999995"
|
||||
width="1.077"
|
||||
height="1.181"
|
||||
x="-0.011010454"
|
||||
y="-0.013426012"
|
||||
width="1.0220209"
|
||||
height="1.0324462"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1-3">
|
||||
<feOffset
|
||||
@@ -945,10 +945,10 @@
|
||||
</feMerge>
|
||||
</filter>
|
||||
<filter
|
||||
x="-0.039000001"
|
||||
y="-0.096999995"
|
||||
width="1.077"
|
||||
height="1.181"
|
||||
x="-0.042581573"
|
||||
y="-0.041728245"
|
||||
width="1.0851631"
|
||||
height="1.1008433"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1-2">
|
||||
<feOffset
|
||||
@@ -1046,6 +1046,40 @@
|
||||
id="feMergeNode8244" />
|
||||
</feMerge>
|
||||
</filter>
|
||||
<filter
|
||||
x="-0.023476223"
|
||||
y="-0.071219566"
|
||||
width="1.0469524"
|
||||
height="1.1617986"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1-36">
|
||||
<feOffset
|
||||
dx="0"
|
||||
dy="2"
|
||||
in="SourceAlpha"
|
||||
result="shadowOffsetOuter1"
|
||||
id="feOffset308-7" />
|
||||
<feGaussianBlur
|
||||
stdDeviation="2"
|
||||
in="shadowOffsetOuter1"
|
||||
result="shadowBlurOuter1"
|
||||
id="feGaussianBlur310-5" />
|
||||
<feColorMatrix
|
||||
values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"
|
||||
type="matrix"
|
||||
in="shadowBlurOuter1"
|
||||
result="shadowMatrixOuter1"
|
||||
id="feColorMatrix312-3" />
|
||||
<feMerge
|
||||
id="feMerge318-5">
|
||||
<feMergeNode
|
||||
in="shadowMatrixOuter1"
|
||||
id="feMergeNode314-6" />
|
||||
<feMergeNode
|
||||
in="SourceGraphic"
|
||||
id="feMergeNode316-2" />
|
||||
</feMerge>
|
||||
</filter>
|
||||
</defs>
|
||||
<text
|
||||
id="text3581"
|
||||
@@ -2042,6 +2076,60 @@
|
||||
d="m 82.16849,58.067121 a 0.82746421,0.82746421 0 0 0 0.842647,-0.827464 V 55.888385 A 0.82746421,0.82746421 0 0 0 82.183672,55.06092 H 77.955254 V 51.098202 A 0.92615261,0.92615261 0 0 0 77.036693,50.179641 H 73.081566 V 45.973997 A 0.82746421,0.82746421 0 0 0 72.254102,45.146533 H 70.90283 a 0.82746421,0.82746421 0 0 0 -0.797098,0.827464 v 4.228418 h -4.478935 v -4.228418 a 0.82746421,0.82746421 0 0 0 -0.835056,-0.827464 h -1.336089 a 0.82746421,0.82746421 0 0 0 -0.827464,0.827464 v 4.228418 h -4.448569 v -4.228418 a 0.82746421,0.82746421 0 0 0 -0.827464,-0.827464 h -1.351272 a 0.82746421,0.82746421 0 0 0 -0.827464,0.827464 v 4.228418 h -3.947536 a 0.92615261,0.92615261 0 0 0 -0.926153,0.895787 v 3.939944 h -4.213235 a 0.82746421,0.82746421 0 0 0 -0.827464,0.827464 v 1.351272 a 0.82746421,0.82746421 0 0 0 0.827464,0.827464 h 4.213235 v 4.440978 h -4.213235 a 0.82746421,0.82746421 0 0 0 -0.827464,0.827464 v 1.374046 a 0.82746421,0.82746421 0 0 0 0.827464,0.835056 h 4.213235 v 4.448566 h -4.213235 a 0.82746421,0.82746421 0 0 0 -0.827464,0.827464 v 1.351272 a 0.82746421,0.82746421 0 0 0 0.827464,0.827464 h 4.213235 v 3.932354 a 0.92615261,0.92615261 0 0 0 0.918561,0.918561 h 3.955128 v 4.228419 a 0.82746421,0.82746421 0 0 0 0.827464,0.827464 h 1.351272 a 0.82746421,0.82746421 0 0 0 0.827464,-0.827464 v -4.228419 h 4.448569 v 4.228419 a 0.82746421,0.82746421 0 0 0 0.827464,0.827464 h 1.336089 a 0.82746421,0.82746421 0 0 0 0.827464,-0.827464 v -4.228419 h 4.486527 v 4.228419 a 0.82746421,0.82746421 0 0 0 0.827464,0.827464 h 1.351272 a 0.82746421,0.82746421 0 0 0 0.827464,-0.827464 v -4.228419 h 3.955127 a 0.92615261,0.92615261 0 0 0 0.888195,-0.918561 v -3.939945 h 4.213236 a 0.82746421,0.82746421 0 0 0 0.842647,-0.827464 V 70.790329 A 0.82746421,0.82746421 0 0 0 82.183672,69.962865 H 77.955254 V 65.52189 h 4.213236 a 0.82746421,0.82746421 0 0 0 0.842647,-0.835056 v -1.34368 A 0.82746421,0.82746421 0 0 0 82.16849,62.485324 H 77.955254 V 58.067121 Z M 74.842774,74.76823 H 53.404619 V 53.299712 h 21.438155 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
<g
|
||||
id="g3633"
|
||||
inkscape:label="pod-template"
|
||||
transform="matrix(0.6040599,0,0,0.6040599,973.44266,521.02252)">
|
||||
<rect
|
||||
transform="matrix(0.40490214,0,0,0.40490214,626.119,463.75335)"
|
||||
style="display:inline;vector-effect:none;fill:#808080;fill-opacity:1;fill-rule:evenodd;stroke:#020202;stroke-width:5.11522;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal;filter:url(#filter-1-36)"
|
||||
id="rect3619"
|
||||
width="313.40689"
|
||||
height="103.30883"
|
||||
x="428.58087"
|
||||
y="-2.5194976"
|
||||
ry="5.617908"
|
||||
inkscape:label="pod-rectangle" />
|
||||
<g
|
||||
id="g3627"
|
||||
inkscape:label="pod-box"
|
||||
style="display:inline;fill:#808080;fill-rule:evenodd;stroke:none;stroke-width:1"
|
||||
transform="translate(-343.49049,269.47902)">
|
||||
<path
|
||||
id="path3621"
|
||||
d="m 1065.6051,188.949 38.7689,-11.2425 38.7688,11.2425 -38.7688,11.24254 z"
|
||||
inkscape:connector-curvature="0"
|
||||
style="fill:#808080;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
inkscape:export-ydpi="376.57999" />
|
||||
<path
|
||||
id="path3623"
|
||||
d="m 1065.6051,193.25418 v 41.2523 l 36.1218,20.00898 0.1788,-50.46488 z"
|
||||
inkscape:connector-curvature="0"
|
||||
style="fill:#808080;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
inkscape:export-ydpi="376.57999" />
|
||||
<path
|
||||
id="path3625"
|
||||
d="m 1143.1428,193.25418 v 41.2523 l -36.1217,20.00898 -0.1788,-50.46488 z"
|
||||
inkscape:connector-curvature="0"
|
||||
style="fill:#808080;fill-rule:evenodd;stroke:#000000;stroke-width:2.741;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
inkscape:export-ydpi="376.57999" />
|
||||
</g>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:23.0917px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.590154"
|
||||
x="863.76526"
|
||||
y="492.69156"
|
||||
id="text3631"
|
||||
inkscape:label="pod-label"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:23.0917px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.590154"
|
||||
sodipodi:role="line"
|
||||
id="tspan3629"
|
||||
x="863.76526"
|
||||
y="492.69156">scheduler</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g11538"
|
||||
transform="matrix(0.94136549,0,0,0.94136549,106.56043,286.48052)">
|
||||
|
||||
|
Before Width: | Height: | Size: 234 KiB After Width: | Height: | Size: 238 KiB |
@@ -0,0 +1 @@
|
||||
<mxfile host="app.diagrams.net" modified="2021-10-18T16:31:49.721Z" agent="5.0 (X11)" etag="1Ll0mAFIDKEoyCq5-xJQ" version="15.5.5" type="device"><diagram name="Page-1" id="c37626ed-c26b-45fb-9056-f9ebc6bb27b6">7V1bk6M2Fv411M48mOJ+ebT7kmxtZzOVrs1unrZkkG22MXJA7vbk168kJC4CDHbjS/fYqczAAQTofOd+xCjm3Xr3Uwo2q19QCGPF0MKdYt4rhqHbrk7+opTvOcU3nJywTKOQn1QSnqO/ICdqnLqNQpjVTsQIxTja1IkBShIY4BoNpCl6q5+2QHH9rhuwhA3CcwDiJvXfUYhXnKprWnngZxgtV/zWns0PzEHwskzRNuH3S1AC8yNrIIbhp2YrEKK3Csl8UMy7FCGcb613dzCm0ypmLL/useNo8cgpTPCQC57Af8H0IX580n//6+nPf/784v6+mxhmPswriLd8LvjT4u9ictjrQTqKrpizt1WE4fMGBPToG4EDoa3wOuaHFyjBj2AdxRQJv6E5wohTn9E2ZRetMCZsNWxzSv4gj0v/oCdk6hKhZQzBJsrUAK3ZgSBjpz4u8jHJpjRqjiWXTOcswyl6KRhIsEVOieL4DsUoJQTGG3MG4miZkN2AzBsk9NkGptEakm36VlGyJMfopc355VP+ClMMdxUSn++fICLDpOQhNXHU57znYuHx3bcSY544ZVWBl+NyIuC4XhZDlwwmG5zHh/DbbvD7SwZT8kqKcUefDKUvGQY4QklO+Bapqvq1gQk6BxGRnymfTIw2lamN4QLLWEhPgoW0BQumQ/aXKQgjwjnBfMUwF+zXDoocOhJxBASYrltDgO42IWAZbRA4FQJ05xCJ165W4nVrHyeFEjAavCVACJ25Y1dBch+lxKpQzNNhUnrlGMy37br4W04L870W5hv2qbhvtTDfiamwhtEr2Vxi9uo5ic42NeoAgwndnmRpwC5y/txSqzXL+Uq5SnjawVHGz4KbBS+N2S8oQeVQ1GrnCCtp4lRNPjV/zBAFLzDVxNPyJ5BfYp5+sNf6mI9Zag9xYv7MTFjJI2q6tdk1R9FdQ9VdVVP1HjYeTK7h+SPqN0/Sb7kFM4Jgr0obQW25pqS2TLuhtgrPuKq2Tqa1Bpgs6oRvhr98EUqAuRhB2zsptuTK2S2+nH5eQ96pyk+qOSoqo1/mPSLzTCoH6BlZNRAGYRAl1DPtkHUJAiTA2tDNYDuH/WI9z3XA07wgFKHcr1scRwnk9BCkL7/SyADTadBUza4TDUbV+6U3BNBbtEqvE3hwvmiV3r3y0C/SF0NnM84YzWD06P73jZ9bo+57SJgj84/ruGqNJIaEoXJItY7CMO4Cct2CXdJEjQ5ZETk19avTAmDdPhGA3cvbHNkQX97meDeb86PaHLcD1Vdkc/wjgturNkPm+8Kcm3EaHdtXYpxE3eSS1slzr8066beQ6Ic1T4VEXLF90o0PHRRZt6BoZLtzAGivxfAMqBdX+F4vHV+yUgTDJXzmz0grPGiJEhA/lNQZ3EX4P0xxGTbf/YM/ON2+33FcsZ3vYichs5pfZYvdP6rHysvYnrgue4E4WIkd0RmgtWnCIrVbr2yxl0rCKe1+KAWIUB4jytJ7ufpdTR93egWZ4EMX/3n9CIN0Cfchm+OPzvpgXSxSyimMAY5eYe3J2pDMh/uGIqbcuIj4nuSXyPo8f0l+VSkPjYF0rW+kfBoaIzHJKl7yHcLWVq37JMJ2pNyUQjpcRj+7rPmXkzVXlhDtSFlzHGkg/8yi1pav/iSiVoiMe4xd65ZOu0U86d430U/1g8ifCDsvIYCeJgugfZwANgY6twC25dsryTtNhAZldMK6zBqxyj1cgG1M3+ye9YeQjQRi2tdGtr6U58/TiPKr2P/a3TBy8jQb75f7NMHO4Z1Skvp3vGbLgWmeNdDpLnV04ZEHrQ1E3omsTcZwA8ikhOzViLbeUqCChE5FeeFkQhBbyxChTd6ReTGEfr6A/P0YtZuxuHXWYFwkr/Y5LSIDGK1ZC3zBvycwh/E3lEW87ZHMGybzSrQqPTArEn9t/bMdzb/ZJm/NX0Q7yvMZu+VUUDVBKdgskq2Mmk3eot16M4GhqQHPmXuBFQDPc13XtH0rMFR2mOdhI/I/cOe+b1rhxDcX3sTSDHviuZ470XQHAs13fDsEVCy8tZ68bCeWt4ALTwsmlgW1iaWbcOK7jj3RfC/wwwXwXXOhbmjz9xhYKZr+Rduv3pIbt3xV91vg4p0q/yj6uCrw6Hcfg236Wni65SlPiDKdEf8HMf7OhQ1smfhVNM0QT7LpSF6lruj1GIX8jZYeGewdvg8XzkVxwfNtN2gcFM2fCRruTWVcBy5E8ehKgCGeu9M/7i01uUa91DQ4wju0piXfiI/yqCn+o+LrytTj296jMntQHgyFWGDvXpxgKLPqyX7l5Lt9J08txXf2uuxHtHQQzkSbDOaJlLy0G6Nt2OF/X76LvWVdntGSrLHpf23hRrMuzH78JhV6/hvJe5Mb4Fu8t6LZve7pn2zdXrMU8BQlW4JrbQ2CFS3N0yW35m6aY6nclAB0oTV5rsz1fevuYik2aQSfcvBSBKG94WoeqoyAEdMpvXex6Hng6i45szYeSPpyaP3lf6dNVUK80miCgus4S/HuzrXQhq8+Oo2mIkdn1eG5QW/mn9tUlhdAll9uqqxiFXHr4sO5Z1v2SOuLfWlRStvq0taVOtbJQsy2/oARIPgWg6SKQVOZOV0YPOx27c7BoW0zraPovqHqjqfqqmWM7QjcJOusktXWeHNmyepZujuw9XAEh/tKmiiP6qD8V5an4UFCU/CbFC1TsKaEdJskFFuGRvPsGl7BqmulBYBSQRBAinmtaMSkO9ssv5BcE9FyU0ThSuJdsvn3b/SqMEzJZZCe+wW+QjpStMgvyKA8WoiSv9FZ3mzncZStFFEe2JBAOvuqjq1HrrV4dQn1ZIwUPlhSKbWlUNARPpyqUiCeqPOrLrUa/REpm1pZ/5KZmEqbwDxGwYsi9wnU4R3yanHj4zGVCHsRwx0fc8bKd1IfAqN1dyJo7LcPWr1JItMbO/fT0fGmS9A15A/RDO6dKyoeYqQz986ZffXbW37qlp/6oPmphpiaxsUTVGbP4rubezqWe7rFWRRStxEtOh1VlFDMd/ireEVs/3JV8Vi5r8naUpifmX/7q+LYSr5s691vvun1+6a6PuCbVOd2TruX7R3aaiVy4v8gARj9RGV/gmj42F8IMDATCCobCcQEd1h8N494z+rxHYRNqxkiIpLwoaD3WM9G7811NCh7Q4wr1EMbuoOMq++4JhjrK0fOgAyP1SYIJ0vxWJ94cVV7SZ+3pI+zUKrI/72/eVykIEXvvGiWL/oQOhvnjw/yhBrs7QQQZZ6LLK6S2sTNYxd86JpoixQj+ZJ9OXGAaP0Ii6s+vrAV67wuJm6ipHWR5R2er1q+a7mW45ma7dVFxnDI9GgEP7apO74l9xkPFcVGrUM/b6rG+rxrr65DoA5ZLHkGgbIuaL6krKQpQ/1omZETpaeWmWbL7i2hf46Efjl+pYHZt726pTWNHkvbulCylFfNqtlAl2UO96zWJDvyYMcLskid97ekOgMl+d1lCWLoeB2F/aya7Fm+r7peeVQsmz9Ypk0y8ZWfUVcVxBT71Z91XoHv7u0aWmdwWr/tIioDU2Xana6T8xWE4awHT8pCDF4slkLyWPxDUxSsGzptbCLtmWLf19UAxUXXsqTOL/Xv1Zrv//jVRFMNy6wjRBi2d6J9Iq0dktMNaLHI4IEYI7vlPzSSn17+Qy7mw/8B</diagram></mxfile>
|
||||
BIN
slides/images/docker-networking-default-bridge-linux.png
Normal file
BIN
slides/images/docker-networking-default-bridge-linux.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 321 KiB |
1
slides/images/docker-networking-networks-linux.drawio
Normal file
1
slides/images/docker-networking-networks-linux.drawio
Normal file
File diff suppressed because one or more lines are too long
BIN
slides/images/docker-networking-networks-linux.png
Normal file
BIN
slides/images/docker-networking-networks-linux.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 540 KiB |
1
slides/images/docker-networking-networks-macwin.drawio
Normal file
1
slides/images/docker-networking-networks-macwin.drawio
Normal file
File diff suppressed because one or more lines are too long
BIN
slides/images/docker-networking-networks-macwin.png
Normal file
BIN
slides/images/docker-networking-networks-macwin.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 445 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user