Compare commits

..

1 Commits

Author SHA1 Message Date
Jerome Petazzoni
8dacf00bf3 Test gitpod 2020-04-03 10:56:39 -05:00
259 changed files with 4279 additions and 41258 deletions

1
.gitpod.yml Normal file
View File

@@ -0,0 +1 @@
image: jpetazzo/shpod

View File

@@ -9,21 +9,21 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,6 +1,3 @@
# Note: hyperkube isn't available after Kubernetes 1.18.
# So we'll have to update this for Kubernetes 1.19!
version: "3"
services:
@@ -12,20 +9,20 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,49 +0,0 @@
k8s_yaml(blob('''
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: registry
name: registry
spec:
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- image: registry
name: registry
---
apiVersion: v1
kind: Service
metadata:
labels:
app: registry
name: registry
spec:
ports:
- port: 5000
protocol: TCP
targetPort: 5000
nodePort: 30555
selector:
app: registry
type: NodePort
'''))
default_registry('localhost:30555')
docker_build('dockercoins/hasher', 'hasher')
docker_build('dockercoins/rng', 'rng')
docker_build('dockercoins/webui', 'webui')
docker_build('dockercoins/worker', 'worker')
k8s_yaml('../k8s/dockercoins.yaml')
# Uncomment the following line to let tilt run with the default kubeadm cluster-admin context.
#allow_k8s_contexts('kubernetes-admin@kubernetes')
# While we're here: if you're controlling a remote cluster, uncomment that line.
# It will create a port forward so that you can access the remote registry.
#k8s_resource(workload='registry', port_forwards='30555:5000')

View File

@@ -1,33 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: certbot
spec:
ports:
- port: 80
protocol: TCP
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: certbot
spec:
rules:
- http:
paths:
- path: /.well-known/acme-challenge/
backend:
serviceName: certbot
servicePort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: certbot
subsets:
- addresses:
- ip: A.B.C.D
ports:
- port: 8000
protocol: TCP

View File

@@ -1,11 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: xyz.A.B.C.D.nip.io
spec:
secretName: xyz.A.B.C.D.nip.io
dnsNames:
- xyz.A.B.C.D.nip.io
issuerRef:
name: letsencrypt-staging
kind: ClusterIssuer

View File

@@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# Remember to update this if you use this manifest to obtain real certificates :)
email: hello@example.com
server: https://acme-staging-v02.api.letsencrypt.org/directory
# To use the production environment, use the following line instead:
#server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: issuer-letsencrypt-staging
solvers:
- http01:
ingress:
class: traefik

View File

@@ -4,10 +4,6 @@ metadata:
name: coffees.container.training
spec:
group: container.training
versions:
- name: v1alpha1
served: true
storage: true
scope: Namespaced
names:
plural: coffees
@@ -15,4 +11,25 @@ spec:
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
properties:
spec:
required:
- taste
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -1,37 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
required: [ taste ]
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -9,9 +9,9 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: excelsa
name: robusta
spec:
taste: fruity
taste: stronger
---
kind: Coffee
apiVersion: container.training/v1alpha1
@@ -23,12 +23,7 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: robusta
name: excelsa
spec:
taste: stronger
bitterness: high
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: java
taste: fruity

View File

@@ -1,77 +0,0 @@
# Basic Consul cluster using Cloud Auto-Join.
# Caveats:
# - no actual persistence
# - scaling down to 1 will break the cluster
# - pods may be colocated
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
containers:
- name: consul
image: "consul:1.8"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"

View File

@@ -1,98 +0,0 @@
# Even better Consul cluster.
# That one uses a volumeClaimTemplate to achieve true persistence.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.8"
volumeMounts:
- name: data
mountPath: /consul/data
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]

View File

@@ -1,9 +1,5 @@
# Better Consul cluster.
# There is still no actual persistence, but:
# - podAntiaffinity prevents pod colocation
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: consul
rules:
@@ -15,16 +11,17 @@ rules:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
@@ -62,22 +59,20 @@ spec:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: consul
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.8"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "consul:1.6"
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-retry-join=provider=k8s label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
@@ -85,4 +80,7 @@ spec:
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "consul leave" ]
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,305 +0,0 @@
# This is a copy of the following file:
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

View File

@@ -1,336 +0,0 @@
# This file is based on the following manifest:
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# It adds a ServiceAccount that has cluster-admin privileges on the cluster,
# and exposes the dashboard on a NodePort. It makes it easier to do quick demos
# of the Kubernetes dashboard, without compromising the security too much.
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cluster-admin
namespace: kubernetes-dashboard

View File

@@ -5,7 +5,7 @@ metadata:
name: fluentd
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
@@ -21,7 +21,7 @@ rules:
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:

View File

@@ -11,7 +11,7 @@ metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: elasticsearch-operator
@@ -41,7 +41,7 @@ rules:
resources: ["elasticsearchclusters"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: elasticsearch-operator
@@ -55,16 +55,13 @@ subjects:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
spec:
replicas: 1
selector:
matchLabels:
name: elasticsearch-operator
template:
metadata:
labels:

View File

@@ -1,30 +0,0 @@
kind: Event
apiVersion: v1
metadata:
generateName: hello-
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
kind: Node
apiVersion: v1
name: kind-control-plane
# Note: the uid should be the Node name (not the uid of the Node).
# This might be specific to global objects.
uid: kind-control-plane
type: Warning
reason: NodeOverheat
message: "Node temperature exceeds critical threshold"
action: Hello
source:
component: thermal-probe
#host: node1
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,36 +0,0 @@
kind: Event
apiVersion: v1
metadata:
# One convention is to use <objectname>.<timestamp>,
# where the timestamp is taken with a nanosecond
# precision and expressed in hexadecimal.
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
name: hello.1234567890
# The label doesn't serve any purpose, except making
# it easier to identify or delete that specific event.
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
### These 5 lines should be updated to refer to an object.
### Make sure to put the correct "uid", because it is what
### "kubectl describe" is using to gather relevant events.
#apiVersion: v1
#kind: Pod
#name: magic-bean
#namespace: blue
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
type: Normal
reason: BackupSuccessful
message: "Object successfully dumped to gitops repository"
source:
component: gitops-sync
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -52,7 +52,7 @@ data:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: filebeat
@@ -60,9 +60,6 @@ metadata:
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
@@ -131,7 +128,7 @@ spec:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat
@@ -144,7 +141,7 @@ roleRef:
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat

View File

@@ -1,4 +1,4 @@
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
@@ -11,4 +11,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
namespace: kube-system

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hackthecluster
spec:
selector:
matchLabels:
app: hackthecluster
template:
metadata:
labels:
app: hackthecluster
spec:
volumes:
- name: slash
hostPath:
path: /
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: alpine
image: alpine
volumeMounts:
- name: slash
mountPath: /hostfs
command:
- sleep
- infinity
securityContext:
#privileged: true
capabilities:
add:
- SYS_CHROOT

View File

@@ -27,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,29 +0,0 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2beta2
metadata:
name: rng
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: rng
minReplicas: 1
maxReplicas: 20
behavior:
scaleUp:
stabilizationWindowSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
metrics:
- type: Object
object:
describedObject:
apiVersion: v1
kind: Service
name: httplat
metric:
name: httplat_latency_seconds
target:
type: Value
value: 0.1

View File

@@ -1,20 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whatever
port:
number: 1234

View File

@@ -1,17 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1 +0,0 @@
ingress-v1beta1.yaml

13
k8s/ingress.yaml Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1,10 +1,3 @@
# This file is based on the following manifest:
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# It adds the "skip login" flag, as well as an insecure hack to defeat SSL.
# As its name implies, it is INSECURE and you should not use it in production,
# or on clusters that contain any kind of important or sensitive data, or on
# clusters that have a life span of more than a few hours.
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -194,7 +187,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0
image: kubernetesui/dashboard:v2.0.0-rc2
imagePullPolicy: Always
ports:
- containerPort: 8443
@@ -233,7 +226,7 @@ spec:
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
@@ -279,7 +272,7 @@ spec:
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
image: kubernetesui/metrics-scraper:v1.0.2
ports:
- containerPort: 8000
protocol: TCP
@@ -300,7 +293,7 @@ spec:
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master

View File

@@ -0,0 +1,162 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,63 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: setup-namespace
spec:
rules:
- name: setup-limitrange
match:
resources:
kinds:
- Namespace
generate:
kind: LimitRange
name: default-limitrange
namespace: "{{request.object.metadata.name}}"
data:
spec:
limits:
- type: Container
min:
cpu: 0.1
memory: 0.1
max:
cpu: 2
memory: 2Gi
default:
cpu: 0.25
memory: 500Mi
defaultRequest:
cpu: 0.25
memory: 250Mi
- name: setup-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: default-resourcequota
namespace: "{{request.object.metadata.name}}"
data:
spec:
hard:
requests.cpu: "10"
requests.memory: 10Gi
limits.cpu: "20"
limits.memory: 20Gi
- name: setup-networkpolicy
match:
resources:
kinds:
- Namespace
generate:
kind: NetworkPolicy
name: default-networkpolicy
namespace: "{{request.object.metadata.name}}"
data:
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}

View File

@@ -1,22 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
resources:
kinds:
- Pod
selector:
matchExpressions:
- key: color
operator: Exists
- key: color
operator: NotIn
values: [ red, green, blue ]
validate:
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -1,21 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
validate:
message: "Once label color has been added, it cannot be changed."
deny:
conditions:
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEqual
value: "{{ request.object.metadata.labels.color }}"

View File

@@ -1,25 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-removal
match:
resources:
kinds:
- Pod
selector:
matchExpressions:
- key: color
operator: DoesNotExist
validate:
message: "Once label color has been added, it cannot be removed."
deny:
conditions:
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotIn
value: []

View File

@@ -1,50 +1,49 @@
# This is a local copy of:
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
rules:
- apiGroups: [ "" ]
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "endpoints", "persistentvolumes", "pods" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
@@ -63,28 +62,27 @@ spec:
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.19
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.8
imagePullPolicy: Always
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
@@ -93,7 +91,6 @@ metadata:
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
@@ -102,59 +99,12 @@ metadata:
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}

View File

@@ -1,61 +1,32 @@
# This file is https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# But with the following arguments added to metrics-server:
# args:
# - --kubelet-insecure-tls
# - --metric-resolution=5s
apiVersion: v1
kind: ServiceAccount
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
@@ -67,127 +38,101 @@ subjects:
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: Service
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --kubelet-insecure-tls
- --metric-resolution=5s
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir
mountPath: /tmp
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
- --metric-resolution=5s
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
kubernetes.io/name: "Metrics-server"
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system

View File

@@ -14,7 +14,7 @@ spec:
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: openebs-local-hostpath-pod
spec:
volumes:
- name: storage
persistentVolumeClaim:
claimName: local-hostpath-pvc
containers:
- name: better
image: alpine
command:
- sh
- -c
- |
while true; do
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
sleep $(($RANDOM % 5 + 20))
done
volumeMounts:
- mountPath: /mnt/storage
name: storage

File diff suppressed because it is too large Load Diff

View File

@@ -22,10 +22,7 @@ spec:
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
containers:
- name: postgres
image: postgres:12
env:
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
image: postgres:11
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres

View File

@@ -1,12 +1,12 @@
---
apiVersion: policy/v1beta1
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
name: restricted
spec:
allowPrivilegeEscalation: false

View File

@@ -1,17 +1,28 @@
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
creationTimestamp: null
generation: 1
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
spec:
replicas: 1
selector:
matchLabels:
app: socat
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: socat
spec:
@@ -23,19 +34,34 @@ spec:
image: alpine
imagePullPolicy: Always
name: socat
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
spec:
externalTrafficPolicy: Cluster
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: socat
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,17 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1,87 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1,102 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --entrypoints.https.http.tls.certResolver=default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1 +0,0 @@
traefik-v2.yaml

103
k8s/traefik.yaml Normal file
View File

@@ -0,0 +1,103 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -8,24 +8,24 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: user=jean.doe
name: users:jean.doe
rules:
- apiGroups: [ certificates.k8s.io ]
resources: [ certificatesigningrequests ]
verbs: [ create ]
- apiGroups: [ certificates.k8s.io ]
resourceNames: [ user=jean.doe ]
resourceNames: [ users:jean.doe ]
resources: [ certificatesigningrequests ]
verbs: [ get, create, delete, watch ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: user=jean.doe
name: users:jean.doe
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: user=jean.doe
name: users:jean.doe
subjects:
- kind: ServiceAccount
name: jean.doe

View File

@@ -3,6 +3,8 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node2
annotations:
node: node2
spec:
capacity:
storage: 10Gi
@@ -24,6 +26,8 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node3
annotations:
node: node3
spec:
capacity:
storage: 10Gi
@@ -45,6 +49,8 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node4
annotations:
node: node4
spec:
capacity:
storage: 10Gi

View File

@@ -1,13 +0,0 @@
#!/bin/sh
# Create an EKS cluster.
# This is not idempotent (each time you run it, it creates a new cluster).
eksctl create cluster \
--node-type=t3.large \
--nodes-max=10 \
--alb-ingress-access \
--asg-access \
--ssh-access \
--with-oidc \
#

View File

@@ -1,32 +0,0 @@
#!/bin/sh
# For each user listed in "users.txt", create an IAM user.
# Also create AWS API access keys, and store them in "users.keys".
# This is idempotent (you can run it multiple times, it will only
# create the missing users). However, it will not remove users.
# Note that you can remove users from "users.keys" (or even wipe
# that file out entirely) and then this script will delete their
# keys and generate new keys for them (and add the new keys to
# "users.keys".)
echo "Getting list of existing users ..."
aws iam list-users --output json | jq -r .Users[].UserName > users.tmp
for U in $(cat users.txt); do
if ! grep -qw $U users.tmp; then
echo "Creating user $U..."
aws iam create-user --user-name=$U \
--tags=Key=container.training,Value=1
fi
if ! grep -qw $U users.keys; then
echo "Listing keys for user $U..."
KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId)
for KEY in $KEYS; do
echo "Deleting key $KEY for user $U..."
aws iam delete-access-key --user=$U --access-key-id=$KEY
done
echo "Creating access key for user $U..."
aws iam create-access-key --user=$U --output json \
| jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \
>> users.keys
fi
done

View File

@@ -1,51 +0,0 @@
#!/bin/sh
# Create an IAM policy to authorize users to do "aws eks update-kubeconfig".
# This is idempotent, which allows to update the policy document below if
# you want the users to do other things as well.
# Note that each time you run this script, it will actually create a new
# version of the policy, set that version as the default version, and
# remove all non-default versions. (Because you can only have up to
# 5 versions of a given policy, so you need to clean them up.)
# After running that script, you will want to attach the policy to our
# users (check the other scripts in that directory).
POLICY_NAME=user.container.training
POLICY_DOC='{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"eks:DescribeCluster"
],
"Resource": "arn:aws:eks:*",
"Effect": "Allow"
}
]
}'
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
aws iam create-policy-version \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
--policy-document "$POLICY_DOC" \
--set-as-default
# For reference, the command below creates a policy without versioning:
#aws iam create-policy \
#--policy-name user.container.training \
#--policy-document "$JSON"
for VERSION in $(
aws iam list-policy-versions \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
--query 'Versions[?!IsDefaultVersion].VersionId' \
--output text)
do
aws iam delete-policy-version \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
--version-id "$VERSION"
done
# For reference, the command below shows all users using the policy:
#aws iam list-entities-for-policy \
#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME

View File

@@ -1,14 +0,0 @@
#!/bin/sh
# Attach our user policy to all the users defined in "users.txt".
# This should be idempotent, because attaching the same policy
# to the same user multiple times doesn't do anything.
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
POLICY_NAME=user.container.training
for U in $(cat users.txt); do
echo "Attaching policy to user $U ..."
aws iam attach-user-policy \
--user-name $U \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
done

View File

@@ -1,24 +0,0 @@
#!/bin/sh
# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users.
# Each user defined in "users.txt" will be mapped to a Kubernetes user
# with the same name, and put in the "container.training" group, too.
# This is idempotent.
# WARNING: this will wipe out the mapUsers component of the aws-auth
# ConfigMap, removing all users that aren't in "users.txt".
# It won't touch mapRoles, so it shouldn't break the role mappings
# put in place by EKS.
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
rm -f users.map
for U in $(cat users.txt); do
echo "\
- userarn: arn:aws:iam::$ACCOUNT:user/$U
username: $U
groups: [ container.training ]\
" >> users.map
done
kubectl create --namespace=kube-system configmap aws-auth \
--dry-run=client --from-file=mapUsers=users.map -o yaml \
| kubectl apply -f-

View File

@@ -1,65 +0,0 @@
#!/bin/sh
# Create a shared Kubernetes Namespace ("container-training") as well as
# individual namespaces for every user in "users.txt", and set up a bunch
# of permissions.
# Specifically:
# - each user gets "view" permissions in the "default" Namespace
# - each user gets "edit" permissions in the "container-training" Namespace
# - each user gets permissions to list Nodes and Namespaces
# - each user gets "admin" permissions in their personal Namespace
# Note that since Kubernetes Namespaces can't have dots in their names,
# if a user has dots, dots will be mapped to dashes.
# So user "ada.lovelace" will get namespace "ada-lovelace".
# This is kind of idempotent (but will raise a bunch of errors for objects
# that already exist).
# TODO: if this needs to evolve, replace all the "create" operations by
# "apply" operations. But this is good enough for now.
kubectl create rolebinding --namespace default container.training \
--group=container.training --clusterrole=view
kubectl create clusterrole view-nodes \
--verb=get,list,watch --resource=node
kubectl create clusterrolebinding view-nodes \
--group=container.training --clusterrole=view-nodes
kubectl create clusterrole view-namespaces \
--verb=get,list,watch --resource=namespace
kubectl create clusterrolebinding view-namespaces \
--group=container.training --clusterrole=view-namespaces
kubectl create namespace container-training
kubectl create rolebinding --namespace container-training edit \
--group=container.training --clusterrole=edit
# Note: API calls to EKS tend to be fairly slow. To optimize things a bit,
# instead of running "kubectl" N times, we generate a bunch of YAML and
# apply it. It will still generate a lot of API calls but it's much faster
# than calling "kubectl" N times. It might be possible to make this even
# faster by generating a "kind: List" (I don't know if this would issue
# a single API calls or multiple ones; TBD!)
for U in $(cat users.txt); do
NS=$(echo $U | tr . -)
cat <<EOF
---
kind: Namespace
apiVersion: v1
metadata:
name: $NS
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin
namespace: $NS
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: $U
EOF
done | kubectl create -f-

View File

@@ -1,76 +0,0 @@
#!/bin/sh
# Create an IAM role to be used by a Kubernetes ServiceAccount.
# The role isn't given any permissions yet (this has to be done by
# another script in this series), but a properly configured Pod
# should still be able to execute "aws sts get-caller-identity"
# and confirm that it's using that role.
# This requires the cluster to have an attached OIDC provider.
# This should be the case if the cluster has been created with
# the scripts in this directory; otherwise, this can be done with
# the subsequent command, which is idempotent:
# eksctl utils associate-iam-oidc-provider --cluster cluster-name-12341234 --approve
# The policy document used below will authorize all ServiceAccounts
# in the "container-training" Namespace to use that role.
# This script will also annotate the container-training:default
# ServiceAccount so that it can use that role.
# This script is not quite idempotent: if you want to use a new
# trust policy, some work will be required. (You can delete the role,
# but that requires detaching the associated policies. There might also
# be a way to update the trust policy directly; we didn't investigate this
# further at this point.)
if [ "$1" ]; then
CLUSTER="$1"
else
echo "Please indicate cluster to use. Available clusters:"
aws eks list-clusters --output table
exit 1
fi
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
OIDC=$(aws eks describe-cluster --name $CLUSTER --query cluster.identity.oidc.issuer --output text | cut -d/ -f3-)
ROLE_NAME=s3-reader-container-training
TRUST_POLICY=$(envsubst <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${ACCOUNT}:oidc-provider/${OIDC}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringLike": {
"${OIDC}:sub": ["system:serviceaccount:container-training:*"]
}
}
}
]
}
EOF
)
aws iam create-role \
--role-name "$ROLE_NAME" \
--assume-role-policy-document "$TRUST_POLICY"
kubectl annotate serviceaccounts \
--namespace container-training default \
"eks.amazonaws.com/role-arn=arn:aws:iam::$ACCOUNT:role/$ROLE_NAME" \
--overwrite
exit
# Here are commands to delete the role:
for POLICY_ARN in $(aws iam list-attached-role-policies --role-name $ROLE_NAME --query 'AttachedPolicies[*].PolicyArn' --output text); do aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN; done
aws iam delete-role --role-name $ROLE_NAME
# Merging the policy with the existing policies:
{
aws iam get-role --role-name s3-reader-container-training | jq -r .Role.AssumeRolePolicyDocument.Statement[]
echo "$TRUST_POLICY" | jq -r .Statement[]
} | jq -s '{"Version": "2012-10-17", "Statement": .}' > /tmp/policy.json
aws iam update-assume-role-policy \
--role-name $ROLE_NAME \
--policy-document file:///tmp/policy.json

View File

@@ -1,54 +0,0 @@
#!/bin/sh
# Create an S3 bucket with two objects in it:
# - public.txt (world-readable)
# - private.txt (private)
# Also create an IAM policy granting read-only access to the bucket
# (and therefore, to the private object).
# Finally, attach the policy to an IAM role (for instance, the role
# created by another script in this directory).
# This isn't idempotent, but it can be made idempotent by replacing the
# "aws iam create-policy" call with "aws iam create-policy-version" and
# a bit of extra elbow grease. (See other scripts in this directory for
# an example).
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
BUCKET=container.training
ROLE_NAME=s3-reader-container-training
POLICY_NAME=s3-reader-container-training
POLICY_DOC=$(envsubst <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetObject*"
],
"Resource": [
"arn:aws:s3:::$BUCKET",
"arn:aws:s3:::$BUCKET/*"
]
}
]
}
EOF
)
aws iam create-policy \
--policy-name $POLICY_NAME \
--policy-doc "$POLICY_DOC"
aws s3 mb s3://container.training
echo "this is a public object" \
| aws s3 cp - s3://container.training/public.txt \
--acl public-read
echo "this is a private object" \
| aws s3 cp - s3://container.training/private.txt \
--acl private
aws iam attach-role-policy \
--role-name "$ROLE_NAME" \
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME

View File

@@ -1,50 +0,0 @@
ada.lovelace
adele.goldstine
amanda.jones
anita.borg
ann.kiessling
barbara.mcclintock
beatrice.worsley
bessie.blount
betty.holberton
beulah.henry
carleen.hutchins
caroline.herschel
dona.bailey
dorothy.hodgkin
ellen.ochoa
edith.clarke
elisha.collier
elizabeth.feinler
emily.davenport
erna.hoover
frances.spence
gertrude.blanch
grace.hopper
grete.hermann
giuliana.tesoro
harriet.tubman
hedy.lamarr
irma.wyman
jane.goodall
jean.bartik
joy.mangano
josephine.cochrane
katherine.blodgett
kathleen.antonelli
lynn.conway
margaret.hamilton
maria.beasley
marie.curie
marjorie.joyner
marlyn.meltzer
mary.kies
melitta.bentz
milly.koss
radia.perlman
rosalind.franklin
ruth.teitelbaum
sarah.mather
sophie.wilson
stephanie.kwolek
yvonne.brill

View File

@@ -4,11 +4,7 @@ These tools can help you to create VMs on:
- Azure
- EC2
- Hetzner
- Linode
- OpenStack
- OVHcloud
- Scaleway
## Prerequisites
@@ -17,8 +13,7 @@ These tools can help you to create VMs on:
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
Depending on the infrastructure that you want to use, you also need to install
the CLI that is specific to that cloud. For OpenStack deployments, you will
need Terraform.
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
And if you want to generate printable cards:
@@ -95,9 +90,6 @@ You're all set!
## `./workshopctl` Usage
If you run `./workshopctl` without arguments, it will show a list of
available commands, looking like this:
```
workshopctl - the orchestration workshop swiss army knife
Commands:
@@ -106,7 +98,32 @@ cards Generate ready-to-print cards for a group of VMs
deploy Install Docker on a bunch of running VMs
disableaddrchecks Disable source/destination IP address checks
disabledocker Stop Docker Engine and don't restart it automatically
...
helmprom Install Helm and Prometheus
help Show available commands
ids (FIXME) List the instance IDs belonging to a given tag or token
kubebins Install Kubernetes and CNI binaries but don't start anything
kubereset Wipe out Kubernetes configuration on all nodes
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
kubetest Check that all nodes are reporting as Ready
listall List VMs running on all configured infrastructures
list List available groups for a given infrastructure
netfix Disable GRO and run a pinger job on the VMs
opensg Open the default security group to ALL ingress traffic
ping Ping VMs in a given tag, to check that they have network access
pssh Run an arbitrary command on all nodes
pull_images Pre-pull a bunch of Docker images
quotas Check our infrastructure quotas (max instances)
remap_nodeports Remap NodePort range to 10000-10999
retag (FIXME) Apply a new tag to a group of VMs
ssh Open an SSH session to the first node of a tag
start Start a group of VMs
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
tags List groups of VMs known locally
test Run tests (pre-flight checks) on a group of VMs
weavetest Check that weave seems properly setup
webssh Install a WEB SSH server on the machines (port 1080)
wrap Run this program in a container
www Run a web server to access card HTML and PDF
```
### Summary of What `./workshopctl` Does For You
@@ -121,8 +138,7 @@ disabledocker Stop Docker Engine and don't restart it automatically
### Example Steps to Launch a group of AWS Instances for a Workshop
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --students 50` to create 50 clusters
- The number of instances will be `students × clustersize`
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
- Your local SSH key will be synced to instances under `ubuntu` user
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
@@ -232,19 +248,12 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
#### List tags
$ ./workshopctl list infra/some-infra-file
$ ./workshopctl listall
$ ./workshopctl tags
$ ./workshopctl inventory infra/some-infra-file
$ ./workshopctl inventory
Note: the `tags` command will show only the VMs that you have provisioned
and deployed on the current machine (i.e. listed in the `tags` subdirectory).
The `inventory` command will try to list all existing VMs (including the
ones not listed in the `tags` directory, and including VMs provisioned
through other mechanisms). It is not supported across all platforms,
however.
#### Stop and destroy VMs
$ ./workshopctl stop TAG

View File

@@ -1,5 +1,4 @@
INFRACLASS=openstack-tf
INFRACLASS=openstack
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
# and customize the variables below.
export TF_VAR_user="jpetazzo"
@@ -7,4 +6,4 @@ export TF_VAR_tenant="training"
export TF_VAR_domain="Default"
export TF_VAR_password="..."
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
export TF_VAR_flavor="GP1.S"
export TF_VAR_flavor="GP1.S"

View File

@@ -1,24 +0,0 @@
INFRACLASS=openstack-cli
# Copy that file to e.g. openstack or ovh, then customize it.
# Some Openstack providers (like OVHcloud) will let you download
# a file containing credentials. That's what you need to use.
# The file below contains some example values.
export OS_AUTH_URL=https://auth.cloud.ovh.net/v3/
export OS_IDENTITY_API_VERSION=3
export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-"Default"}
export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME:-"Default"}
export OS_TENANT_ID=abcd1234
export OS_TENANT_NAME="0123456"
export OS_USERNAME="user-xyz123"
export OS_PASSWORD=AbCd1234
export OS_REGION_NAME="GRA7"
# And then some values to indicate server type, image, etc.
# You can see available flavors with `openstack flavor list`
export OS_FLAVOR=s1-4
# You can see available images with `openstack image list`
export OS_IMAGE=896c5f54-51dc-44f0-8c22-ce99ba7164df
# You can create a key with `openstack keypair create --public-key ~/.ssh/id_rsa.pub containertraining`
export OS_KEY=containertraining

View File

@@ -1,5 +0,0 @@
INFRACLASS=hetzner
if ! [ -f ~/.config/hcloud/cli.toml ]; then
warning "~/.config/hcloud/cli.toml not found."
warning "Make sure that the Hetzner CLI (hcloud) is installed and configured."
fi

View File

@@ -1,3 +0,0 @@
INFRACLASS=scaleway
#SCW_INSTANCE_TYPE=DEV1-L
#SCW_ZONE=fr-par-2

View File

@@ -66,7 +66,7 @@ need_infra() {
need_tag() {
if [ -z "$TAG" ]; then
die "Please specify a tag. To see available tags, run: $0 tags"
die "Please specify a tag or token. To see available tags and tokens, run: $0 list"
fi
if [ ! -d "tags/$TAG" ]; then
die "Tag $TAG not found (directory tags/$TAG does not exist)."

View File

@@ -1,9 +1,5 @@
export AWS_DEFAULT_OUTPUT=text
# Ignore SSH key validation when connecting to these remote hosts.
# (Otherwise, deployment scripts break when a VM IP address reuse.)
SSHOPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR"
HELP=""
_cmd() {
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
@@ -47,16 +43,6 @@ _cmd_cards() {
info "$0 www"
}
_cmd clean "Remove information about stopped clusters"
_cmd_clean() {
for TAG in tags/*; do
if grep -q ^stopped$ "$TAG/status"; then
info "Removing $TAG..."
rm -rf "$TAG"
fi
done
}
_cmd deploy "Install Docker on a bunch of running VMs"
_cmd_deploy() {
TAG=$1
@@ -73,35 +59,11 @@ _cmd_deploy() {
echo deploying > tags/$TAG/status
sep "Deploying tag $TAG"
# If this VM image is using cloud-init,
# wait for cloud-init to be done
# Wait for cloudinit to be done
pssh "
if [ -d /var/lib/cloud ]; then
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
fi"
# Special case for scaleway since it doesn't come with sudo
if [ "$INFRACLASS" = "scaleway" ]; then
pssh -l root "
grep DEBIAN_FRONTEND /etc/environment || echo DEBIAN_FRONTEND=noninteractive >> /etc/environment
grep cloud-init /etc/sudoers && rm /etc/sudoers
apt-get update && apt-get install sudo -y"
fi
# FIXME
# Special case for hetzner since it doesn't have an ubuntu user
#if [ "$INFRACLASS" = "hetzner" ]; then
# pssh -l root "
#[ -d /home/ubuntu ] ||
# useradd ubuntu -m -s /bin/bash
#echo 'ubuntu ALL=(ALL:ALL) NOPASSWD:ALL' > /etc/sudoers.d/ubuntu
#[ -d /home/ubuntu/.ssh ] ||
# install --owner=ubuntu --mode=700 --directory /home/ubuntu/.ssh
#[ -f /home/ubuntu/.ssh/authorized_keys ] ||
# install --owner=ubuntu --mode=600 /root/.ssh/authorized_keys --target-directory /home/ubuntu/.ssh"
#fi
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done"
# Copy settings and install Python YAML parser
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
@@ -109,12 +71,6 @@ _cmd_deploy() {
sudo apt-get update &&
sudo apt-get install -y python-yaml"
# If there is no "python" binary, symlink to python3
#pssh "
#if ! which python; then
# ln -s $(which python3) /usr/local/bin/python
#fi"
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
pssh -I tee /tmp/postprep.py <lib/postprep.py
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
@@ -126,7 +82,7 @@ _cmd_deploy() {
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
pssh "
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
ssh $SSHOPTS \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
ssh -o StrictHostKeyChecking=no \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
sudo -u docker tar -C /home/docker -xf-"
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
@@ -170,27 +126,24 @@ _cmd_kubebins() {
TAG=$1
need_tag
##VERSION##
ETCD_VERSION=v3.4.13
K8SBIN_VERSION=v1.19.11 # Can't go to 1.20 because it requires a serviceaccount signing key.
CNI_VERSION=v0.8.7
pssh --timeout 300 "
set -e
cd /usr/local/bin
if ! [ -x etcd ]; then
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-amd64.tar.gz \
##VERSION##
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x hyperkube ]; then
##VERSION##
curl -L https://dl.k8s.io/$K8SBIN_VERSION/kubernetes-server-linux-amd64.tar.gz \
curl -L https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx \
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
fi
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
if ! [ -x bridge ]; then
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-amd64-$CNI_VERSION.tgz \
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
| sudo tar -zx
fi
"
@@ -220,22 +173,13 @@ _cmd_kube() {
pssh --timeout 200 "
sudo apt-get update -q &&
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
# Disable swap
# (note that this won't survive across node reboots!)
if [ "$INFRACLASS" = "linode" ]; then
pssh "
sudo swapoff -a"
fi
# Initialize kube control plane
# Initialize kube master
pssh --timeout 200 "
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
kubeadm token generate > /tmp/token &&
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4) --ignore-preflight-errors=NumCPU
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
fi"
# Put kubeconfig in ubuntu's and docker's accounts
@@ -259,7 +203,7 @@ _cmd_kube() {
pssh --timeout 200 "
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
TOKEN=\$(ssh $SSHOPTS \$FIRSTNODE cat /tmp/token) &&
TOKEN=\$(ssh -o StrictHostKeyChecking=no \$FIRSTNODE cat /tmp/token) &&
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
fi"
@@ -268,23 +212,17 @@ _cmd_kube() {
if i_am_first_node; then
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
fi"
}
_cmd kubetools "Install a bunch of CLI tools for Kubernetes"
_cmd_kubetools() {
TAG=$1
need_tag
# Install kubectx and kubens
pssh "
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
sudo ln -sf \$HOME/kubectx/kubectx /usr/local/bin/kctx &&
sudo ln -sf \$HOME/kubectx/kubens /usr/local/bin/kns &&
sudo cp \$HOME/kubectx/completion/*.bash /etc/bash_completion.d &&
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
sudo -u docker tee -a /home/docker/.bashrc <<EOF
. \$HOME/kube-ps1/kube-ps1.sh
. /home/ubuntu/kube-ps1/kube-ps1.sh
KUBE_PS1_PREFIX=""
KUBE_PS1_SUFFIX=""
KUBE_PS1_SYMBOL_ENABLE="false"
@@ -308,85 +246,23 @@ EOF"
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
# Install kustomize
pssh "
if [ ! -x /usr/local/bin/kustomize ]; then
##VERSION##
curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.6.1/kustomize_v3.6.1_linux_amd64.tar.gz |
sudo tar -C /usr/local/bin -zx kustomize
echo complete -C /usr/local/bin/kustomize kustomize | sudo tee /etc/bash_completion.d/kustomize
fi"
# Install ship
# Note: 0.51.3 is the last version that doesn't display GIN-debug messages
# (don't want to get folks confused by that!)
pssh "
if [ ! -x /usr/local/bin/ship ]; then
##VERSION##
curl -L https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_amd64.tar.gz |
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
sudo tar -C /usr/local/bin -zx ship
fi"
# Install the AWS IAM authenticator
pssh "
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
##VERSION##
##VERSION##
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
sudo chmod +x /usr/local/bin/aws-iam-authenticator
fi"
# Install the krew package manager
pssh "
if [ ! -d /home/docker/.krew ]; then
cd /tmp &&
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz |
tar -zxf- &&
sudo -u docker -H ./krew-linux_amd64 install krew &&
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
fi"
# Install k9s
pssh "
if [ ! -x /usr/local/bin/k9s ]; then
VERSION=v0.24.10 &&
FILENAME=k9s_\${VERSION}_\$(uname -s)_\$(uname -m).tar.gz &&
curl -sSL https://github.com/derailed/k9s/releases/download/\$VERSION/\$FILENAME |
sudo tar -zxvf- -C /usr/local/bin k9s
fi"
# Install popeye
pssh "
if [ ! -x /usr/local/bin/popeye ]; then
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
sudo tar -zxvf- -C /usr/local/bin popeye
fi"
# Install Tilt
pssh "
if [ ! -x /usr/local/bin/tilt ]; then
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
fi"
# Install Skaffold
pssh "
if [ ! -x /usr/local/bin/skaffold ]; then
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 &&
sudo install skaffold /usr/local/bin/
fi"
# Install Kompose
pssh "
if [ ! -x /usr/local/bin/kompose ]; then
curl -Lo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-amd64 &&
sudo install kompose /usr/local/bin
fi"
pssh "
if [ ! -x /usr/local/bin/kubeseal ]; then
curl -Lo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.13.1/kubeseal-linux-amd64 &&
sudo install kubeseal /usr/local/bin
fi"
sep "Done"
}
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
@@ -408,44 +284,44 @@ _cmd_kubetest() {
set -e
if i_am_first_node; then
which kubectl
for NODE in \$(grep [0-9]\$ /etc/hosts | grep -v ^127 | awk {print\ \\\$2}); do
for NODE in \$(awk /[0-9]\$/\ {print\ \\\$2} /etc/hosts); do
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
done
fi"
}
_cmd ips "Show the IP addresses for a given tag"
_cmd_ips() {
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
_cmd_ids() {
TAG=$1
need_tag $TAG
SETTINGS=tags/$TAG/settings.yaml
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
while true; do
for I in $(seq $CLUSTERSIZE); do
read ip || return 0
printf "%s\t" "$ip"
done
printf "\n"
done < tags/$TAG/ips.txt
info "Looking up by tag:"
aws_get_instance_ids_by_tag $TAG
# Just in case we managed to create instances but weren't able to tag them
info "Looking up by token:"
aws_get_instance_ids_by_client_token $TAG
}
_cmd inventory "List all VMs on a given infrastructure (or all infras if no arg given)"
_cmd_inventory() {
case "$1" in
"")
for INFRA in infra/*; do
$0 inventory $INFRA
done
;;
*/example.*)
;;
*)
need_infra $1
sep "Listing instances for $1"
infra_list
;;
esac
_cmd list "List available groups for a given infrastructure"
_cmd_list() {
need_infra $1
infra_list
}
_cmd listall "List VMs running on all configured infrastructures"
_cmd_listall() {
for infra in infra/*; do
case $infra in
infra/example.*)
;;
*)
info "Listing infrastructure $infra:"
need_infra $infra
infra_list
;;
esac
done
}
_cmd maketag "Generate a quasi-unique tag for a group of instances"
@@ -453,7 +329,7 @@ _cmd_maketag() {
if [ -z $USER ]; then
export USER=anonymous
fi
MS=$(($(date +%N | tr -d 0)/1000000))
MS=$(($(date +%N)/1000000))
date +%Y-%m-%d-%H-%M-$MS-$USER
}
@@ -524,6 +400,16 @@ _cmd_opensg() {
infra_opensg
}
_cmd portworx "Prepare the nodes for Portworx deployment"
_cmd_portworx() {
TAG=$1
need_tag
pssh "
sudo truncate --size 10G /portworx.blk &&
sudo losetup /dev/loop4 /portworx.blk"
}
_cmd disableaddrchecks "Disable source/destination IP address checks"
_cmd_disableaddrchecks() {
TAG=$1
@@ -560,17 +446,6 @@ _cmd_remap_nodeports() {
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
fi"
info "If you have manifests hard-coding nodePort values,"
info "you might want to patch them with a command like:"
info "
if i_am_first_node; then
kubectl -n kube-system patch svc prometheus-server \\
-p 'spec: { ports: [ {port: 80, nodePort: 10101} ]}'
fi
"
}
_cmd quotas "Check our infrastructure quotas (max instances)"
@@ -579,14 +454,25 @@ _cmd_quotas() {
infra_quotas
}
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
_cmd_retag() {
OLDTAG=$1
NEWTAG=$2
TAG=$OLDTAG
need_tag
if [[ -z "$NEWTAG" ]]; then
die "You must specify a new tag to apply."
fi
aws_tag_instances $OLDTAG $NEWTAG
}
_cmd ssh "Open an SSH session to the first node of a tag"
_cmd_ssh() {
TAG=$1
need_tag
IP=$(head -1 tags/$TAG/ips.txt)
info "Logging into $IP"
ssh $SSHOPTS docker@$IP
ssh docker@$IP
}
_cmd start "Start a group of VMs"
@@ -595,9 +481,8 @@ _cmd_start() {
case "$1" in
--infra) INFRA=$2; shift 2;;
--settings) SETTINGS=$2; shift 2;;
--count) die "Flag --count is deprecated; please use --students instead." ;;
--count) COUNT=$2; shift 2;;
--tag) TAG=$2; shift 2;;
--students) STUDENTS=$2; shift 2;;
*) die "Unrecognized parameter: $1."
esac
done
@@ -609,14 +494,8 @@ _cmd_start() {
die "Please add --settings flag to specify which settings file to use."
fi
if [ -z "$COUNT" ]; then
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
if [ -z "$STUDENTS" ]; then
warning "Neither --count nor --students was specified."
warning "According to the settings file, the cluster size is $CLUSTERSIZE."
warning "Deploying one cluster of $CLUSTERSIZE nodes."
STUDENTS=1
fi
COUNT=$(($STUDENTS*$CLUSTERSIZE))
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
warning "No --count option was specified. Using value from settings file ($COUNT)."
fi
# Check that the specified settings and infrastructure are valid.
@@ -634,43 +513,11 @@ _cmd_start() {
infra_start $COUNT
sep
info "Successfully created $COUNT instances with tag $TAG"
sep
echo created > tags/$TAG/status
# If the settings.yaml file has a "steps" field,
# automatically execute all the actions listed in that field.
# If an action fails, retry it up to 10 times.
python -c 'if True: # hack to deal with indentation
import sys, yaml
settings = yaml.safe_load(sys.stdin)
print ("\n".join(settings.get("steps", [])))
' < tags/$TAG/settings.yaml \
| while read step; do
if [ -z "$step" ]; then
break
fi
sep
info "Automatically executing step '$step'."
TRY=1
MAXTRY=10
while ! $0 $step $TAG ; do
TRY=$(($TRY+1))
if [ $TRY -gt $MAXTRY ]; then
error "This step ($step) failed after $MAXTRY attempts."
info "You can troubleshoot the situation manually, or terminate these instances with:"
info "$0 stop $TAG"
die "Giving up."
else
sep
info "Step '$step' failed. Let's wait 10 seconds and try again."
info "(Attempt $TRY out of $MAXTRY.)"
sleep 10
fi
done
done
sep
info "Deployment successful."
info "To log into the first machine of that batch, you can run:"
info "$0 ssh $TAG"
info "To deploy Docker on these instances, you can run:"
info "$0 deploy $TAG"
info "To terminate these instances, you can run:"
info "$0 stop $TAG"
}
@@ -725,7 +572,7 @@ _cmd_tmux() {
IP=$(head -1 tags/$TAG/ips.txt)
info "Opening ssh+tmux with $IP"
rm -f /tmp/tmux-$UID/default
ssh $SSHOPTS -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
ssh -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
}
_cmd helmprom "Install Helm and Prometheus"
@@ -734,8 +581,8 @@ _cmd_helmprom() {
need_tag
pssh "
if i_am_first_node; then
sudo -u docker -H helm repo add prometheus-community https://prometheus-community.github.io/helm-charts/
sudo -u docker -H helm install prometheus prometheus-community/prometheus \
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
sudo -u docker -H helm install prometheus stable/prometheus \
--namespace kube-system \
--set server.service.type=NodePort \
--set server.service.nodePort=30090 \
@@ -744,31 +591,6 @@ _cmd_helmprom() {
fi"
}
_cmd passwords "Set individual passwords for each cluster"
_cmd_passwords() {
TAG=$1
need_tag
PASSWORDS_FILE="tags/$TAG/passwords"
if ! [ -f "$PASSWORDS_FILE" ]; then
error "File $PASSWORDS_FILE not found. Please create it first."
error "It should contain one password per line."
error "It should have as many lines as there are clusters."
die "Aborting."
fi
N_CLUSTERS=$($0 ips "$TAG" | wc -l)
N_PASSWORDS=$(wc -l < "$PASSWORDS_FILE")
if [ "$N_CLUSTERS" != "$N_PASSWORDS" ]; then
die "Found $N_CLUSTERS clusters and $N_PASSWORDS passwords. Aborting."
fi
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
info "Setting password for $nodes..."
for node in $nodes; do
echo docker:$password | ssh $SSHOPTS ubuntu@$node sudo chpasswd
done
done
info "Done."
}
# Sometimes, weave fails to come up on some nodes.
# Symptom: the pods on a node are unreachable (they don't even ping).
# Remedy: wipe out Weave state and delete weave pod on that node.
@@ -793,12 +615,11 @@ _cmd_webssh() {
sudo apt-get update &&
sudo apt-get install python-tornado python-paramiko -y"
pssh "
cd /opt
[ -d webssh ] || sudo git clone https://github.com/jpetazzo/webssh"
[ -d webssh ] || git clone https://github.com/jpetazzo/webssh"
pssh "
for KEYFILE in /etc/ssh/*.pub; do
read a b c < \$KEYFILE; echo localhost \$a \$b
done | sudo tee /opt/webssh/known_hosts"
done > webssh/known_hosts"
pssh "cat >webssh.service <<EOF
[Unit]
Description=webssh
@@ -807,7 +628,7 @@ Description=webssh
WantedBy=multi-user.target
[Service]
WorkingDirectory=/opt/webssh
WorkingDirectory=/home/ubuntu/webssh
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
User=nobody
Group=nogroup
@@ -830,6 +651,11 @@ _cmd_www() {
python3 -m http.server
}
greet() {
IAMUSER=$(aws iam get-user --query 'User.UserName')
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
}
pull_tag() {
# Pre-pull a bunch of images
pssh --timeout 900 'for I in \
@@ -897,7 +723,10 @@ test_vm() {
"ls -la /home/docker/.ssh"; do
sep "$cmd"
echo "$cmd" \
| ssh -A $SSHOPTS $user@$ip sudo -u docker -i \
| ssh -A -q \
-o "UserKnownHostsFile /dev/null" \
-o "StrictHostKeyChecking=no" \
$user@$ip sudo -u docker -i \
|| {
status=$?
error "$cmd exit status: $status"
@@ -916,3 +745,27 @@ make_key_name() {
SHORT_FINGERPRINT=$(ssh-add -l | grep RSA | head -n1 | cut -d " " -f 2 | tr -d : | cut -c 1-8)
echo "${SHORT_FINGERPRINT}-${USER}"
}
sync_keys() {
# make sure ssh-add -l contains "RSA"
ssh-add -l | grep -q RSA \
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
AWS_KEY_NAME=$(make_key_name)
info "Syncing keys... "
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
--public-key-material "$(ssh-add -L \
| grep -i RSA \
| head -n1 \
| cut -d " " -f 1-2)" &>/dev/null
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
else
info "Imported new key $AWS_KEY_NAME."
fi
else
info "Using existing key $AWS_KEY_NAME."
fi
}

View File

@@ -1,14 +1,9 @@
if ! command -v aws >/dev/null; then
warning "AWS CLI (aws) not found."
fi
infra_list() {
aws ec2 describe-instances --output json |
jq -r '.Reservations[].Instances[] | [.InstanceId, .ClientToken, .State.Name, .InstanceType ] | @tsv'
aws_display_tags
}
infra_quotas() {
aws_greet
greet
max_instances=$(aws ec2 describe-account-attributes \
--attribute-names max-instances \
@@ -26,10 +21,10 @@ infra_start() {
COUNT=$1
# Print our AWS username, to ease the pain of credential-juggling
aws_greet
greet
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
key_name=$(aws_sync_keys)
key_name=$(sync_keys)
AMI=$(aws_get_ami) # Retrieve the AWS image ID
if [ -z "$AMI" ]; then
@@ -66,7 +61,7 @@ infra_start() {
aws_tag_instances $TAG $TAG
# Wait until EC2 API tells us that the instances are running
aws_wait_until_tag_is_running $TAG $COUNT
wait_until_tag_is_running $TAG $COUNT
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
}
@@ -103,7 +98,7 @@ infra_disableaddrchecks() {
done
}
aws_wait_until_tag_is_running() {
wait_until_tag_is_running() {
max_retry=100
i=0
done_count=0
@@ -217,34 +212,5 @@ aws_tag_instances() {
aws_get_ami() {
##VERSION##
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a ${AWS_ARCHITECTURE-amd64} -v 18.04 -t hvm:ebs -N -q
}
aws_greet() {
IAMUSER=$(aws iam get-user --query 'User.UserName')
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
}
aws_sync_keys() {
# make sure ssh-add -l contains "RSA"
ssh-add -l | grep -q RSA \
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
AWS_KEY_NAME=$(make_key_name)
info "Syncing keys... "
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
--public-key-material "$(ssh-add -L \
| grep -i RSA \
| head -n1 \
| cut -d " " -f 1-2)" &>/dev/null
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
else
info "Imported new key $AWS_KEY_NAME."
fi
else
info "Using existing key $AWS_KEY_NAME."
fi
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
}

View File

@@ -1,57 +0,0 @@
if ! command -v hcloud >/dev/null; then
warning "Hetzner CLI (hcloud) not found."
fi
if ! [ -f ~/.config/hcloud/cli.toml ]; then
warning "~/.config/hcloud/cli.toml not found."
fi
infra_list() {
[ "$(hcloud server list -o json)" = "null" ] && return
hcloud server list -o json |
jq -r '.[] | [.id, .name , .status, .server_type.name] | @tsv'
}
infra_start() {
COUNT=$1
HETZNER_INSTANCE_TYPE=${HETZNER_INSTANCE_TYPE-cx21}
HETZNER_DATACENTER=${HETZNER_DATACENTER-nbg1-dc3}
HETZNER_IMAGE=${HETZNER_IMAGE-168855}
for I in $(seq 1 $COUNT); do
NAME=$(printf "%s-%03d" $TAG $I)
sep "Starting instance $I/$COUNT"
info " Datacenter: $HETZNER_DATACENTER"
info " Name: $NAME"
info " Instance type: $HETZNER_INSTANCE_TYPE"
hcloud server create \
--type=${HETZNER_INSTANCE_TYPE} \
--datacenter=${HETZNER_DATACENTER} \
--image=${HETZNER_IMAGE} \
--name=$NAME \
--label=tag=$TAG \
--ssh-key ~/.ssh/id_rsa.pub
done
hetzner_get_ips_by_tag $TAG > tags/$TAG/ips.txt
}
infra_stop() {
for ID in $(hetzner_get_ids_by_tag $TAG); do
info "Scheduling deletion of instance $ID..."
hcloud server delete $ID &
done
info "Waiting for deletion to complete..."
wait
}
hetzner_get_ids_by_tag() {
TAG=$1
hcloud server list --selector=tag=$TAG -o json | jq -r .[].name
}
hetzner_get_ips_by_tag() {
TAG=$1
hcloud server list --selector=tag=$TAG -o json | jq -r .[].public_net.ipv4.ip
}

View File

@@ -1,58 +0,0 @@
if ! command -v linode-cli >/dev/null; then
warning "Linode CLI (linode-cli) not found."
fi
if ! [ -f ~/.config/linode-cli ]; then
warning "~/.config/linode-cli not found."
fi
# To view available regions: "linode-cli regions list"
LINODE_REGION=${LINODE_REGION-us-west}
# To view available types: "linode-cli linodes types"
LINODE_TYPE=${LINODE_TYPE-g6-standard-2}
infra_list() {
linode-cli linodes list --json |
jq -r '.[] | [.id, .label, .status, .type] | @tsv'
}
infra_start() {
COUNT=$1
for I in $(seq 1 $COUNT); do
NAME=$(printf "%s-%03d" $TAG $I)
sep "Starting instance $I/$COUNT"
info " Zone: $LINODE_REGION"
info " Name: $NAME"
info " Instance type: $LINODE_TYPE"
ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)"
linode-cli linodes create \
--type=${LINODE_TYPE} --region=${LINODE_REGION} \
--image=linode/ubuntu18.04 \
--authorized_keys="${LINODE_SSHKEY}" \
--root_pass="${ROOT_PASS}" \
--tags=${TAG} --label=${NAME}
done
sep
linode_get_ips_by_tag $TAG > tags/$TAG/ips.txt
}
infra_stop() {
info "Counting instances..."
linode_get_ids_by_tag $TAG | wc -l
info "Deleting instances..."
linode_get_ids_by_tag $TAG |
xargs -n1 -P10 \
linode-cli linodes delete
}
linode_get_ids_by_tag() {
TAG=$1
linode-cli linodes list --tags $TAG --json | jq -r ".[].id"
}
linode_get_ips_by_tag() {
TAG=$1
linode-cli linodes list --tags $TAG --json | jq -r ".[].ipv4[0]"
}

View File

@@ -1,53 +0,0 @@
infra_list() {
openstack server list -f json |
jq -r '.[] | [.ID, .Name , .Status, .Flavor] | @tsv'
}
infra_start() {
COUNT=$1
sep "Starting $COUNT instances"
info " Region: $OS_REGION_NAME"
info " User: $OS_USERNAME"
info " Flavor: $OS_FLAVOR"
info " Image: $OS_IMAGE"
openstack server create \
--flavor $OS_FLAVOR \
--image $OS_IMAGE \
--key-name $OS_KEY \
--min $COUNT --max $COUNT \
--property workshopctl=$TAG \
$TAG
sep "Waiting for IP addresses to be available"
GOT=0
while [ "$GOT" != "$COUNT" ]; do
echo "Got $GOT/$COUNT IP addresses."
oscli_get_ips_by_tag $TAG > tags/$TAG/ips.txt
GOT="$(wc -l < tags/$TAG/ips.txt)"
done
}
infra_stop() {
info "Counting instances..."
oscli_get_instances_json $TAG |
jq -r .[].Name |
wc -l
info "Deleting instances..."
oscli_get_instances_json $TAG |
jq -r .[].Name |
xargs -P10 -n1 openstack server delete
info "Done."
}
oscli_get_instances_json() {
TAG=$1
openstack server list -f json --name "${TAG}-[0-9]*"
}
oscli_get_ips_by_tag() {
TAG=$1
oscli_get_instances_json $TAG |
jq -r .[].Networks | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' || true
}

View File

@@ -1,28 +0,0 @@
infra_start() {
COUNT=$1
cp terraform/*.tf tags/$TAG
(
cd tags/$TAG
if ! terraform init; then
error "'terraform init' failed."
error "If it mentions the following error message:"
error "openpgp: signature made by unknown entity."
error "Then you need to upgrade Terraform to 0.11.15"
error "to upgrade its signing keys following the"
error "codecov breach."
die "Aborting."
fi
echo prefix = \"$TAG\" >> terraform.tfvars
echo count = \"$COUNT\" >> terraform.tfvars
terraform apply -auto-approve
terraform output ip_addresses > ips.txt
)
}
infra_stop() {
(
cd tags/$TAG
terraform destroy -auto-approve
)
}

View File

@@ -0,0 +1,20 @@
infra_start() {
COUNT=$1
cp terraform/*.tf tags/$TAG
(
cd tags/$TAG
terraform init
echo prefix = \"$TAG\" >> terraform.tfvars
echo count = \"$COUNT\" >> terraform.tfvars
terraform apply -auto-approve
terraform output ip_addresses > ips.txt
)
}
infra_stop() {
(
cd tags/$TAG
terraform destroy -auto-approve
)
}

View File

@@ -1,51 +0,0 @@
if ! command -v scw >/dev/null; then
warning "Scaleway CLI (scw) not found."
fi
if ! [ -f ~/.config/scw/config.yaml ]; then
warning "~/.config/scw/config.yaml not found."
fi
SCW_INSTANCE_TYPE=${SCW_INSTANCE_TYPE-DEV1-M}
SCW_ZONE=${SCW_ZONE-fr-par-1}
infra_list() {
scw instance server list -o json |
jq -r '.[] | [.id, .name, .state, .commercial_type] | @tsv'
}
infra_start() {
COUNT=$1
for I in $(seq 1 $COUNT); do
NAME=$(printf "%s-%03d" $TAG $I)
sep "Starting instance $I/$COUNT"
info " Zone: $SCW_ZONE"
info " Name: $NAME"
info " Instance type: $SCW_INSTANCE_TYPE"
scw instance server create \
type=${SCW_INSTANCE_TYPE} zone=${SCW_ZONE} \
image=ubuntu_bionic name=${NAME}
done
sep
scw_get_ips_by_tag $TAG > tags/$TAG/ips.txt
}
infra_stop() {
info "Counting instances..."
scw_get_ids_by_tag $TAG | wc -l
info "Deleting instances..."
scw_get_ids_by_tag $TAG |
xargs -n1 -P10 \
scw instance server delete zone=${SCW_ZONE} force-shutdown=true with-ip=true
}
scw_get_ids_by_tag() {
TAG=$1
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].id
}
scw_get_ips_by_tag() {
TAG=$1
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].public_ip.address
}

View File

@@ -1,23 +0,0 @@
infra_disableaddrchecks() {
die "unimplemented"
}
infra_list() {
die "unimplemented"
}
infra_opensg() {
die "unimplemented"
}
infra_quotas() {
die "unimplemented"
}
infra_start() {
die "unimplemented"
}
infra_stop() {
die "unimplemented"
}

View File

@@ -37,7 +37,7 @@ def system(cmd):
td = str(t2-t1)[:5]
f.write(bold("[{}] in {}s\n".format(retcode, td)))
STEP += 1
with open(os.environ["HOME"] + "/.bash_history", "a") as f:
with open("/home/ubuntu/.bash_history", "a") as f:
f.write("{}\n".format(cmd))
if retcode != 0:
msg = "The following command failed with exit code {}:\n".format(retcode)
@@ -114,7 +114,7 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
system("sudo service ssh restart")
system("sudo apt-get -q update")
system("sudo apt-get -qy install git jid jq")
system("sudo apt-get -qy install git jq")
system("sudo apt-get -qy install emacs-nox joe")
#######################

View File

@@ -18,13 +18,7 @@ pssh() {
echo "[parallel-ssh] $@"
export PSSH=$(which pssh || which parallel-ssh)
case "$INFRACLASS" in
hetzner) LOGIN=root ;;
linode) LOGIN=root ;;
*) LOGIN=ubuntu ;;
esac
$PSSH -h $HOSTFILE -l $LOGIN \
$PSSH -h $HOSTFILE -l ubuntu \
--par 100 \
-O LogLevel=ERROR \
-O UserKnownHostsFile=/dev/null \

View File

@@ -1,71 +1,49 @@
#!/usr/bin/env python
"""
There are two ways to use this script:
1. Pass a file name and a tag name as a single argument.
It will load a list of domains from the given file (one per line),
and assign them to the clusters corresponding to that tag.
There should be more domains than clusters.
Example: ./map-dns.py domains.txt 2020-08-15-jp
2. Pass a domain as the 1st argument, and IP addresses then.
It will configure the domain with the listed IP addresses.
Example: ./map-dns.py open-duck.site 1.2.3.4 2.3.4.5 3.4.5.6
In both cases, the domains should be configured to use GANDI LiveDNS.
"""
import os
import requests
import sys
import yaml
# This can be tweaked if necessary.
# configurable stuff
domains_file = "../../plentydomains/domains.txt"
config_file = os.path.join(
os.environ["HOME"], ".config/gandi/config.yaml")
os.environ["HOME"], ".config/gandi/config.yaml")
tag = "test"
apiurl = "https://dns.api.gandi.net/api/v5/domains"
# inferred stuff
domains = open(domains_file).read().split()
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
ips = open(f"tags/{tag}/ips.txt").read().split()
settings_file = f"tags/{tag}/settings.yaml"
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
# Figure out if we're called for a bunch of domains, or just one.
domain_or_domain_file = sys.argv[1]
if os.path.isfile(domain_or_domain_file):
domains = open(domain_or_domain_file).read().split()
domains = [ d for d in domains if not d.startswith('#') ]
tag = sys.argv[2]
ips = open(f"tags/{tag}/ips.txt").read().split()
settings_file = f"tags/{tag}/settings.yaml"
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
else:
domains = [domain_or_domain_file]
ips = sys.argv[2:]
clustersize = len(ips)
# Now, do the work.
# now do the fucking work
while domains and ips:
domain = domains[0]
domains = domains[1:]
cluster = ips[:clustersize]
ips = ips[clustersize:]
print(f"{domain} => {cluster}")
zone = ""
node = 0
for ip in cluster:
node += 1
zone += f"@ 300 IN A {ip}\n"
zone += f"* 300 IN A {ip}\n"
zone += f"node{node} 300 IN A {ip}\n"
r = requests.put(
f"{apiurl}/{domain}/records",
headers={"x-api-key": apikey},
data=zone)
print(r.text)
domain = domains[0]
domains = domains[1:]
cluster = ips[:clustersize]
ips = ips[clustersize:]
print(f"{domain} => {cluster}")
zone = ""
node = 0
for ip in cluster:
node += 1
zone += f"@ 300 IN A {ip}\n"
zone += f"* 300 IN A {ip}\n"
zone += f"node{node} 300 IN A {ip}\n"
r = requests.put(
f"{apiurl}/{domain}/records",
headers={"x-api-key": apikey},
data=zone)
print(r.text)
#r = requests.get(
# f"{apiurl}/{domain}/records",
# headers={"x-api-key": apikey},
# )
#r = requests.get(
# f"{apiurl}/{domain}/records",
# headers={"x-api-key": apikey},
# )
if domains:
print(f"Good, we have {len(domains)} domains left.")
print(f"Good, we have {len(domains)} domains left.")
if ips:
print(f"Crap, we have {len(ips)} IP addresses left.")
print(f"Crap, we have {len(ips)} IP addresses left.")

View File

@@ -21,9 +21,3 @@ machine_version: 0.15.0
# Password used to connect with the "docker user"
docker_user_password: training
steps:
- deploy
- webssh
- tailhist
- cards

View File

@@ -20,11 +20,3 @@ machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
steps:
- deploy
- webssh
- tailhist
- kube
- kubetools
- cards
- kubetest

View File

@@ -0,0 +1,24 @@
# 3 nodes for k8s 101 workshops
# Number of VMs per cluster
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -30,13 +30,11 @@ TAG=$PREFIX-$SETTINGS
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--students $STUDENTS
--count $STUDENTS
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl disabledocker $TAG
retry 5 ./workshopctl kubebins $TAG
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
./workshopctl cards $TAG
SETTINGS=admin-kubenet
@@ -45,13 +43,11 @@ TAG=$PREFIX-$SETTINGS
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--students $STUDENTS
--count $((3*$STUDENTS))
retry 5 ./workshopctl disableaddrchecks $TAG
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kubebins $TAG
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
./workshopctl cards $TAG
SETTINGS=admin-kuberouter
@@ -60,13 +56,11 @@ TAG=$PREFIX-$SETTINGS
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--students $STUDENTS
--count $((3*$STUDENTS))
retry 5 ./workshopctl disableaddrchecks $TAG
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kubebins $TAG
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
./workshopctl cards $TAG
#INFRA=infra/aws-us-west-1
@@ -79,9 +73,8 @@ TAG=$PREFIX-$SETTINGS
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--students $STUDENTS
--count $((3*$STUDENTS))
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kube $TAG 1.19.11
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
retry 5 ./workshopctl kube $TAG 1.15.9
./workshopctl cards $TAG

View File

@@ -1,7 +1,7 @@
resource "openstack_compute_instance_v2" "machine" {
count = "${var.count}"
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
image_name = "Ubuntu 18.04.4 20200324"
image_name = "Ubuntu 16.04.5 (Xenial Xerus)"
flavor_name = "${var.flavor}"
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"

View File

@@ -15,9 +15,9 @@ for lib in lib/*.sh; do
done
DEPENDENCIES="
aws
ssh
curl
fping
jq
pssh
wkhtmltopdf

View File

@@ -1,24 +1,15 @@
# Uncomment and/or edit one of the the following lines if necessary.
#/ /kube-halfday.yml.html 200!
#/ /kube-fullday.yml.html 200!
#/ /kube-twodays.yml.html 200!
/ /fireeye.yml.html 200!
#/ /kube-halfday.yml.html 200
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
#/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=DOCKERALLDAY
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
#/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=SWEETFEBSALEC1
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=SWEETFEBSALEC4
# Shortlink for the QRCode
/q /qrcode.html 200
# Shortlinks for next training in English and French
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
/hi5 https://enix.io/fr/services/formation/online/
# Survey form
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform

View File

@@ -233,7 +233,7 @@ def setup_tmux_and_ssh():
ipaddr = "$IPADDR"
uid = os.getuid()
raise Exception(r"""
raise Exception("""
1. If you're running this directly from a node:
tmux
@@ -247,16 +247,6 @@ rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-100
3. If you cannot control a remote tmux:
tmux new-session ssh docker@{ipaddr}
4. If you are running this locally with a remote cluster, make sure your prompt has the expected format:
tmux
IPADDR=$(
kubectl get nodes -o json |
jq -r '.items[0].status.addresses[] | select(.type=="ExternalIP") | .address'
)
export PS1="\n[{ipaddr}] \u@\h:\w\n\$ "
""".format(uid=uid, ipaddr=ipaddr))
else:
logging.info("Found tmux session. Trying to acquire shell prompt.")

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,6 @@
"version": "0.0.1",
"dependencies": {
"express": "^4.16.2",
"socket.io": "^2.4.0"
"socket.io": "^2.0.4"
}
}

View File

@@ -1,7 +1,7 @@
class: title
# Advanced Dockerfile Syntax
# Advanced Dockerfiles
![construction](images/title-advanced-dockerfiles.jpg)
@@ -12,10 +12,7 @@ class: title
We have seen simple Dockerfiles to illustrate how Docker build
container images.
In this section, we will give a recap of the Dockerfile syntax,
and introduce advanced Dockerfile commands that we might
come across sometimes; or that we might want to use in some
specific scenarios.
In this section, we will see more Dockerfile commands.
---
@@ -423,8 +420,3 @@ ONBUILD COPY . /src
* You can't chain `ONBUILD` instructions with `ONBUILD`.
* `ONBUILD` can't be used to trigger `FROM` instructions.
???
:EN:- Advanced Dockerfile syntax
:FR:- Dockerfile niveau expert

View File

@@ -44,64 +44,6 @@ Fri Feb 20 00:28:55 UTC 2015
---
## When `^C` doesn't work...
Sometimes, `^C` won't be enough.
Why? And how can we stop the container in that case?
---
## What happens when we hit `^C`
`SIGINT` gets sent to the container, which means:
- `SIGINT` gets sent to PID 1 (default case)
- `SIGINT` gets sent to *foreground processes* when running with `-ti`
But there is a special case for PID 1: it ignores all signals!
- except `SIGKILL` and `SIGSTOP`
- except signals handled explicitly
TL,DR: there are many circumstances when `^C` won't stop the container.
---
class: extra-details
## Why is PID 1 special?
- PID 1 has some extra responsibilities:
- it starts (directly or indirectly) every other process
- when a process exits, its processes are "reparented" under PID 1
- When PID 1 exits, everything stops:
- on a "regular" machine, it causes a kernel panic
- in a container, it kills all the processes
- We don't want PID 1 to stop accidentally
- That's why it has these extra protections
---
## How to stop these containers, then?
- Start another terminal and forget about them
(for now!)
- We'll shortly learn about `docker kill`
---
## Run a container in the background
Containers can be started in the background, with the `-d` flag (daemon mode):
@@ -338,8 +280,3 @@ CONTAINER ID IMAGE ... CREATED STATUS
5c1dfd4d81f1 jpetazzo/clock ... 40 min. ago Exited (0) 40 min. ago
b13c164401fb ubuntu ... 55 min. ago Exited (130) 53 min. ago
```
???
:EN:- Foreground and background containers
:FR:- Exécution interactive ou en arrière-plan

View File

@@ -131,7 +131,7 @@ root@fcfb62f0bfde:/# figlet hello
|_| |_|\___|_|_|\___/
```
It works! 🎉
It works! .emoji[🎉]
---
@@ -167,8 +167,3 @@ Automated process = good.
In the next chapter, we will learn how to automate the build
process by writing a `Dockerfile`.
???
:EN:- Building our first images interactively
:FR:- Fabriquer nos premières images à la main

View File

@@ -89,44 +89,6 @@ To keep things simple for now: this is the directory where our Dockerfile is loc
## What happens when we build the image?
It depends if we're using BuildKit or not!
If there are lots of blue lines and the first line looks like this:
```
[+] Building 1.8s (4/6)
```
... then we're using BuildKit.
If the output is mostly black-and-white and the first line looks like this:
```
Sending build context to Docker daemon 2.048kB
```
... then we're using the "classic" or "old-style" builder.
---
## To BuildKit or Not To BuildKit
Classic builder:
- copies the whole "build context" to the Docker Engine
- linear (processes lines one after the other)
- requires a full Docker Engine
BuildKit:
- only transfers parts of the "build context" when needed
- will parallelize operations (when possible)
- can run in non-privileged containers (e.g. on Kubernetes)
---
## With the classic builder
The output of `docker build` looks like this:
.small[
@@ -169,7 +131,7 @@ Sending build context to Docker daemon 2.048 kB
* Be careful (or patient) if that directory is big and your link is slow.
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
* It tells docker to ignore specific files in the directory
@@ -199,64 +161,6 @@ Removing intermediate container e01b294dbffd
---
## With BuildKit
.small[
```bash
[+] Building 7.9s (7/7) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 98B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for docker.io/library/ubuntu:latest 1.2s
=> [1/3] FROM docker.io/library/ubuntu@sha256:cf31af331f38d1d7158470e095b132acd126a7180a54f263d386 3.2s
=> => resolve docker.io/library/ubuntu@sha256:cf31af331f38d1d7158470e095b132acd126a7180a54f263d386 0.0s
=> => sha256:cf31af331f38d1d7158470e095b132acd126a7180a54f263d386da88eb681d93 1.20kB / 1.20kB 0.0s
=> => sha256:1de4c5e2d8954bf5fa9855f8b4c9d3c3b97d1d380efe19f60f3e4107a66f5cae 943B / 943B 0.0s
=> => sha256:6a98cbe39225dadebcaa04e21dbe5900ad604739b07a9fa351dd10a6ebad4c1b 3.31kB / 3.31kB 0.0s
=> => sha256:80bc30679ac1fd798f3241208c14accd6a364cb8a6224d1127dfb1577d10554f 27.14MB / 27.14MB 2.3s
=> => sha256:9bf18fab4cfbf479fa9f8409ad47e2702c63241304c2cdd4c33f2a1633c5f85e 850B / 850B 0.5s
=> => sha256:5979309c983a2adeff352538937475cf961d49c34194fa2aab142effe19ed9c1 189B / 189B 0.4s
=> => extracting sha256:80bc30679ac1fd798f3241208c14accd6a364cb8a6224d1127dfb1577d10554f 0.7s
=> => extracting sha256:9bf18fab4cfbf479fa9f8409ad47e2702c63241304c2cdd4c33f2a1633c5f85e 0.0s
=> => extracting sha256:5979309c983a2adeff352538937475cf961d49c34194fa2aab142effe19ed9c1 0.0s
=> [2/3] RUN apt-get update 2.5s
=> [3/3] RUN apt-get install figlet 0.9s
=> exporting to image 0.1s
=> => exporting layers 0.1s
=> => writing image sha256:3b8aee7b444ab775975dfba691a72d8ac24af2756e0a024e056e3858d5a23f7c 0.0s
=> => naming to docker.io/library/figlet 0.0s
```
]
---
## Understanding BuildKit output
- BuildKit transfers the Dockerfile and the *build context*
(these are the first two `[internal]` stages)
- Then it executes the steps defined in the Dockerfile
(`[1/3]`, `[2/3]`, `[3/3]`)
- Finally, it exports the result of the build
(image definition + collection of layers)
---
class: extra-details
## BuildKit plain output
- When running BuildKit in e.g. a CI pipeline, its output will be different
- We can see the same output format by using `--progress=plain`
---
## The caching system
If you run the same build again, it will be instantaneous. Why?
@@ -267,10 +171,10 @@ If you run the same build again, it will be instantaneous. Why?
* Docker uses the exact strings defined in your Dockerfile, so:
* `RUN apt-get install figlet cowsay`
* `RUN apt-get install figlet cowsay `
<br/> is different from
<br/> `RUN apt-get install cowsay figlet`
* `RUN apt-get update` is not re-executed when the mirrors are updated
You can force a rebuild with `docker build --no-cache ...`.
@@ -292,7 +196,7 @@ root@91f3c974c9a1:/# figlet hello
```
Yay! 🎉
Yay! .emoji[🎉]
---
@@ -459,10 +363,3 @@ In this example, `sh -c` will still be used, but
The shell gets replaced by `figlet` when `figlet` starts execution.
This allows to run processes as PID 1 without using JSON.
???
:EN:- Towards automated, reproducible builds
:EN:- Writing our first Dockerfile
:FR:- Rendre le processus automatique et reproductible
:FR:- Écrire son premier Dockerfile

View File

@@ -272,46 +272,3 @@ $ docker run -it --entrypoint bash myfiglet
root@6027e44e2955:/#
```
---
## `CMD` and `ENTRYPOINT` recap
- `docker run myimage` executes `ENTRYPOINT` + `CMD`
- `docker run myimage args` executes `ENTRYPOINT` + `args` (overriding `CMD`)
- `docker run --entrypoint prog myimage` executes `prog` (overriding both)
.small[
| Command | `ENTRYPOINT` | `CMD` | Result
|---------------------------------|--------------------|---------|-------
| `docker run figlet` | none | none | Use values from base image (`bash`)
| `docker run figlet hola` | none | none | Error (executable `hola` not found)
| `docker run figlet` | `figlet -f script` | none | `figlet -f script`
| `docker run figlet hola` | `figlet -f script` | none | `figlet -f script hola`
| `docker run figlet` | none | `figlet -f script` | `figlet -f script`
| `docker run figlet hola` | none | `figlet -f script` | Error (executable `hola` not found)
| `docker run figlet` | `figlet -f script` | `hello` | `figlet -f script hello`
| `docker run figlet hola` | `figlet -f script` | `hello` | `figlet -f script hola`
]
---
## When to use `ENTRYPOINT` vs `CMD`
`ENTRYPOINT` is great for "containerized binaries".
Example: `docker run consul --help`
(Pretend that the `docker run` part isn't there!)
`CMD` is great for images with multiple binaries.
Example: `docker run busybox ifconfig`
(It makes sense to indicate *which* program we want to run!)
???
:EN:- CMD and ENTRYPOINT
:FR:- CMD et ENTRYPOINT

View File

@@ -1,40 +1,51 @@
# Compose for development stacks
Dockerfile = great to build *one* container image.
Dockerfiles are great to build container images.
What if we have multiple containers?
But what if we work with a complex stack made of multiple containers?
What if some of them require particular `docker run` parameters?
Eventually, we will want to write some custom scripts and automation to build, run, and connect
our containers together.
How do we connect them all together?
There is a better way: using Docker Compose.
... Compose solves these use-cases (and a few more).
In this section, you will use Compose to bootstrap a development environment.
---
## Life before Compose
## What is Docker Compose?
Before we had Compose, we would typically write custom scripts to:
Docker Compose (formerly known as `fig`) is an external tool.
- build container images,
Unlike the Docker Engine, it is written in Python. It's open source as well.
- run containers using these images,
The general idea of Compose is to enable a very simple, powerful onboarding workflow:
- connect the containers together,
- rebuild, restart, update these images and containers.
---
## Life with Compose
Compose enables a simple, powerful onboarding workflow:
1. Checkout our code.
1. Checkout your code.
2. Run `docker-compose up`.
3. Our app is up and running!
3. Your app is up and running!
---
## Compose overview
This is how you work with Compose:
* You describe a set (or stack) of containers in a YAML file called `docker-compose.yml`.
* You run `docker-compose up`.
* Compose automatically pulls images, builds containers, and starts them.
* Compose can set up links, volumes, and other Docker options for you.
* Compose can run the containers in the background, or in the foreground.
* When containers are running in the foreground, their aggregated output is shown.
Before diving in, let's see a small example of Compose in action.
---
@@ -44,61 +55,20 @@ class: pic
---
## Life after Compose
## Checking if Compose is installed
(Or: when do we need something else?)
If you are using the official training virtual machines, Compose has been
pre-installed.
- Compose is *not* an orchestrator
If you are using Docker for Mac/Windows or the Docker Toolbox, Compose comes with them.
- It isn't designed to need to run containers on multiple nodes
If you are on Linux (desktop or server environment), you will need to install Compose from its [release page](https://github.com/docker/compose/releases) or with `pip install docker-compose`.
(it can, however, work with Docker Swarm Mode)
You can always check that it is installed by running:
- Compose isn't ideal if we want to run containers on Kubernetes
- it uses different concepts (Compose services ≠ Kubernetes services)
- it needs a Docker Engine (althought containerd support might be coming)
---
## First rodeo with Compose
1. Write Dockerfiles
2. Describe our stack of containers in a YAML file called `docker-compose.yml`
3. `docker-compose up` (or `docker-compose up -d` to run in the background)
4. Compose pulls and builds the required images, and starts the containers
5. Compose shows the combined logs of all the containers
(if running in the background, use `docker-compose logs`)
6. Hit Ctrl-C to stop the whole stack
(if running in the background, use `docker-compose stop`)
---
## Iterating
After making changes to our source code, we can:
1. `docker-compose build` to rebuild container images
2. `docker-compose up` to restart the stack with the new images
We can also combine both with `docker-compose up --build`
Compose will be smart, and only recreate the containers that have changed.
When working with interpreted languages:
- dont' rebuild each time
- leverage a `volumes` section instead
```bash
$ docker-compose --version
```
---
@@ -107,37 +77,38 @@ When working with interpreted languages:
First step: clone the source code for the app we will be working on.
```bash
git clone https://github.com/jpetazzo/trainingwheels
cd trainingwheels
$ cd
$ git clone https://github.com/jpetazzo/trainingwheels
...
$ cd trainingwheels
```
Second step: start the app.
Second step: start your app.
```bash
docker-compose up
$ docker-compose up
```
Watch Compose build and run the app.
That Compose stack exposes a web server on port 8000; try connecting to it.
Watch Compose build and run your app with the correct parameters,
including linking the relevant containers together.
---
## Launching Our First Stack with Compose
We should see a web page like this:
Verify that the app is running at `http://<yourHostIP>:8000`.
![composeapp](images/composeapp.png)
Each time we reload, the counter should increase.
---
## Stopping the app
When we hit Ctrl-C, Compose tries to gracefully terminate all of the containers.
When you hit `^C`, Compose tries to gracefully terminate all of the containers.
After ten seconds (or if we press `^C` again) it will forcibly kill them.
After ten seconds (or if you press `^C` again) it will forcibly kill
them.
---
@@ -147,13 +118,13 @@ Here is the file used in the demo:
.small[
```yaml
version: "3"
version: "2"
services:
www:
build: www
ports:
- ${PORT-8000}:5000
- 8000:5000
user: nobody
environment:
DEBUG: 1
@@ -172,9 +143,9 @@ services:
A Compose file has multiple sections:
* `version` is mandatory. (Typically use "3".)
* `version` is mandatory. (We should use `"2"` or later; version 1 is deprecated.)
* `services` is mandatory. Each service corresponds to a container.
* `services` is mandatory. A service is one or more replicas of the same image running as containers.
* `networks` is optional and indicates to which networks containers should be connected.
<br/>(By default, containers will be connected on a private, per-compose-file network.)
@@ -193,8 +164,6 @@ A Compose file has multiple sections:
* Version 3 added support for deployment options (scaling, rolling updates, etc).
* Typically use `version: "3"`.
The [Docker documentation](https://docs.docker.com/compose/compose-file/)
has excellent information about the Compose file format if you need to know more about versions.
@@ -232,45 +201,34 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
---
## Environment variables
## Compose commands
- We can use environment variables in Compose files
We already saw `docker-compose up`, but another one is `docker-compose build`.
(like `$THIS` or `${THAT}`)
It will execute `docker build` for all containers mentioning a `build` path.
- We can provide default values, e.g. `${PORT-8000}`
It can also be invoked automatically when starting the application:
- Compose will also automatically load the environment file `.env`
```bash
docker-compose up --build
```
(it should contain `VAR=value`, one per line)
Another common option is to start containers in the background:
- This is a great way to customize build and run parameters
(base image versions to use, build and run secrets, port numbers...)
```bash
docker-compose up -d
```
---
## Running multiple copies of a stack
## Check container status
- Copy the stack in two different directories, e.g. `front` and `frontcopy`
It can be tedious to check the status of your containers with `docker ps`,
especially when running multiple apps at the same time.
- Compose prefixes images and containers with the directory name:
Compose makes it easier; with `docker-compose ps` you will see only the status of the
containers of the current stack:
`front_www`, `front_www_1`, `front_db_1`
`frontcopy_www`, `frontcopy_www_1`, `frontcopy_db_1`
- Alternatively, use `docker-compose -p frontcopy`
(to set the `--project-name` of a stack, which default to the dir name)
- Each copy is isolated from the others (runs on a different network)
---
## Checking stack status
We have `ps`, `docker ps`, and similarly, `docker-compose ps`:
```bash
$ docker-compose ps
@@ -280,10 +238,6 @@ trainingwheels_redis_1 /entrypoint.sh red Up 6379/tcp
trainingwheels_www_1 python counter.py Up 0.0.0.0:8000->5000/tcp
```
Shows the status of all the containers of our stack.
Doesn't show the other containers.
---
## Cleaning up (1)
@@ -327,44 +281,44 @@ Use `docker-compose down -v` to remove everything including volumes.
## Special handling of volumes
- When an image gets updated, Compose automatically creates a new container
Compose is smart. If your container uses volumes, when you restart your
application, Compose will create a new container, but carefully re-use
the volumes it was using previously.
- The data in the old container is lost...
- ... Except if the container is using a *volume*
- Compose will then re-attach that volume to the new container
(and data is then retained across database upgrades)
- All good database images use volumes
(e.g. all official images)
This makes it easy to upgrade a stateful service, by pulling its
new image and just restarting your stack with Compose.
---
class: extra-details
## Compose project name
## A bit of history and trivia
* When you run a Compose command, Compose infers the "project name" of your app.
- Compose was initially named "Fig"
* By default, the "project name" is the name of the current directory.
- Compose is one of the only components of Docker written in Python
* For instance, if you are in `/home/zelda/src/ocarina`, the project name is `ocarina`.
(almost everything else is in Go)
* All resources created by Compose are tagged with this project name.
- In 2020, Docker introduced "Compose CLI":
* The project name also appears as a prefix of the names of the resources.
- `docker compose` command to deploy Compose stacks to some clouds
E.g. in the previous example, service `www` will create a container `ocarina_www_1`.
- progressively getting feature parity with `docker-compose`
* The project name can be overridden with `docker-compose -p`.
- also provides numerous improvements (e.g. leverages BuildKit by default)
---
???
## Running two copies of the same app
:EN:- Using compose to describe an environment
:EN:- Connecting services together with a *Compose file*
If you want to run two copies of the same app simultaneously, all you have to do is to
make sure that each copy has a different project name.
:FR:- Utiliser Compose pour décrire son environnement
:FR:- Écrire un *Compose file* pour connecter les services entre eux
You can:
* copy your code in a directory with a different name
* start each copy with `docker-compose -p myprojname up`
Each copy will run in a different network, totally isolated from the other.
This is ideal to debug regressions, do side-by-side comparisons, etc.

View File

@@ -27,9 +27,9 @@ We will also explain the principle of overlay networks and network plugins.
## The Container Network Model
Docker has "networks".
The CNM was introduced in Engine 1.9.0 (November 2015).
We can manage them with the `docker network` commands; for instance:
The CNM adds the notion of a *network*, and a new top-level command to manipulate and see those networks: `docker network`.
```bash
$ docker network ls
@@ -41,79 +41,59 @@ eb0eeab782f4 host host
228a4355d548 blog-prod overlay
```
New networks can be created (with `docker network create`).
---
(Note: networks `none` and `host` are special; let's set them aside for now.)
## What's in a network?
* Conceptually, a network is a virtual switch.
* It can be local (to a single Engine) or global (spanning multiple hosts).
* A network has an IP subnet associated to it.
* Docker will allocate IP addresses to the containers connected to a network.
* Containers can be connected to multiple networks.
* Containers can be given per-network names and aliases.
* The names and aliases can be resolved via an embedded DNS server.
---
## What's a network?
## Network implementation details
- Conceptually, a Docker "network" is a virtual switch
* A network is managed by a *driver*.
(we can also think about it like a VLAN, or a WiFi SSID, for instance)
* The built-in drivers include:
- By default, containers are connected to a single network
* `bridge` (default)
(but they can be connected to zero, or many networks, even dynamically)
* `none`
- Each network has its own subnet (IP address range)
* `host`
- A network can be local (to a single Docker Engine) or global (span multiple hosts)
* `macvlan`
- Containers can have *network aliases* providing DNS-based service discovery
* A multi-host driver, *overlay*, is available out of the box (for Swarm clusters).
(and each network has its own "domain", "zone", or "scope")
* More drivers can be provided by plugins (OVS, VLAN...)
* A network can have a custom IPAM (IP allocator).
---
## Service discovery
class: extra-details
- A container can be given a network alias
## Differences with the CNI
(e.g. with `docker run --net some-network --net-alias db ...`)
* CNI = Container Network Interface
- The containers running in the same network can resolve that network alias
* CNI is used notably by Kubernetes
(i.e. if they do a DNS lookup on `db`, it will give the container's address)
* With CNI, all the nodes and containers are on a single IP network
- We can have a different `db` container in each network
(this avoids naming conflicts between different stacks)
- When we name a container, it automatically adds the name as a network alias
(i.e. `docker run --name xyz ...` is like `docker run --net-alias xyz ...`
---
## Network isolation
- Networks are isolated
- By default, containers in network A cannot reach those in network B
- A container connected to both networks A and B can act as a router or proxy
- Published ports are always reachable through the Docker host address
(`docker run -P ...` makes a container port available to everyone)
---
## How to use networks
- We typically create one network per "stack" or app that we deploy
- More complex apps or stacks might require multiple networks
(e.g. `frontend`, `backend`, ...)
- Networks allow us to deploy multiple copies of the same stack
(e.g. `prod`, `dev`, `pr-442`, ....)
- If we use Docker Compose, this is managed automatically for us
* Both CNI and CNM offer the same functionality, but with very different methods
---
@@ -141,30 +121,6 @@ class: pic
---
class: extra-details
## CNM vs CNI
- CNM is the model used by Docker
- Kubernetes uses a different model, architectured around CNI
(CNI is a kind of API between a container engine and *CNI plugins*)
- Docker model:
- multiple isolated networks
- per-network service discovery
- network interconnection requires extra steps
- Kubernetes model:
- single flat network
- per-namespace service discovery
- network isolation requires extra steps (Network Policies)
---
## Creating a network
Let's create a network called `dev`.
@@ -234,12 +190,8 @@ class: extra-details
## Resolving container addresses
Since Docker Engine 1.10, name resolution is implemented by a dynamic resolver.
Archeological note: when CNM was intoduced (in Docker Engine 1.9, November 2015)
name resolution was implemented with `/etc/hosts`, and it was updated each time
CONTAINERs were added/removed. This could cause interesting race conditions
since `/etc/hosts` was a bind-mount (and couldn't be updated atomically).
In Docker Engine 1.9, name resolution is implemented with `/etc/hosts`, and
updating it each time containers are added/removed.
.small[
```bash
@@ -256,6 +208,10 @@ ff02::2 ip6-allrouters
```
]
In Docker Engine 1.10, this has been replaced by a dynamic resolver.
(This avoids race conditions when updating `/etc/hosts`.)
---
# Service discovery with containers
@@ -309,12 +265,12 @@ Note: we're not using a FQDN or an IP address here; just `redis`.
* That container must be on the same network as the web server.
* It must have the right network alias (`redis`) so the application can find it.
* It must have the right name (`redis`) so the application can find it.
Start the container:
```bash
$ docker run --net dev --net-alias redis -d redis
$ docker run --net dev --name redis -d redis
```
---
@@ -331,19 +287,34 @@ $ docker run --net dev --net-alias redis -d redis
## A few words on *scope*
- Container names are unique (there can be only one `--name redis`)
* What if we want to run multiple copies of our application?
- Network aliases are not unique
* Since names are unique, there can be only one container named `redis` at a time.
- We can have the same network alias in different networks:
```bash
docker run --net dev --net-alias redis ...
docker run --net prod --net-alias redis ...
```
* However, we can specify the network name of our container with `--net-alias`.
- We can even have multiple containers with the same alias in the same network
* `--net-alias` is scoped per network, and independent from the container name.
(in that case, we get multiple DNS entries, aka "DNS round robin")
---
class: extra-details
## Using a network alias instead of a name
Let's remove the `redis` container:
```bash
$ docker rm -f redis
```
And create one that doesn't block the `redis` name:
```bash
$ docker run --net dev --net-alias redis -d redis
```
Check that the app still works (but the counter is back to 1,
since we wiped out the old Redis container).
---
@@ -376,9 +347,7 @@ A container can have multiple network aliases.
Network aliases are *local* to a given network (only exist in this network).
Multiple containers can have the same network alias (even on the same network).
Since Docker Engine 1.11, resolving a network alias yields the IP addresses of all containers holding this alias.
Multiple containers can have the same network alias (even on the same network). In Docker Engine 1.11, resolving a network alias yields the IP addresses of all containers holding this alias.
---
@@ -531,24 +500,6 @@ b2887adeb5578a01fd9c55c435cad56bbbe802350711d2743691f95743680b09
---
## Network drivers
* A network is managed by a *driver*.
* The built-in drivers include:
* `bridge` (default)
* `none`
* `host`
* `macvlan`
* `overlay` (for Swarm clusters)
* More drivers can be provided by plugins (OVS, VLAN...)
* A network can have a custom IPAM (IP allocator).
---
## Overlay networks
* The features we've seen so far only work when all containers are on a single host.
@@ -789,15 +740,3 @@ class: extra-details
* This may be used to access an internal package repository.
(But try to use a multi-stage build instead, if possible!)
???
:EN:Container networking essentials
:EN:- The Container Network Model
:EN:- Container isolation
:EN:- Service discovery
:FR:Mettre ses conteneurs en réseau
:FR:- Le "Container Network Model"
:FR:- Isolation des conteneurs
:FR:- *Service discovery*

View File

@@ -15,84 +15,53 @@ At the end of this section, you will be able to:
* Run a network service in a container.
* Connect to that network service.
* Manipulate container networking basics.
* Find a container's IP address.
---
## Running a very simple service
- We need something small, simple, easy to configure
(or, even better, that doesn't require any configuration at all)
- Let's use the official NGINX image (named `nginx`)
- It runs a static web server listening on port 80
- It serves a default "Welcome to nginx!" page
We will also explain the different network models used by Docker.
---
## Runing an NGINX server
## A simple, static web server
Run the Docker Hub image `nginx`, which contains a basic web server:
```bash
$ docker run -d -P nginx
66b1ce719198711292c8f34f84a7b68c3876cf9f67015e752b94e189d35a204e
```
- Docker will automatically pull the `nginx` image from the Docker Hub
* Docker will download the image from the Docker Hub.
- `-d` / `--detach` tells Docker to run it in the background
* `-d` tells Docker to run the image in the background.
- `P` / `--publish-all` tells Docker to publish all ports
* `-P` tells Docker to make this service reachable from other computers.
<br/>(`-P` is the short version of `--publish-all`.)
(publish = make them reachable from other computers)
- ...OK, how do we connect to our web server now?
But, how do we connect to our web server now?
---
## Finding our web server port
- First, we need to find the *port number* used by Docker
We will use `docker ps`:
(the NGINX container listens on port 80, but this port will be *mapped*)
```bash
$ docker ps
CONTAINER ID IMAGE ... PORTS ...
e40ffb406c9e nginx ... 0.0.0.0:32768->80/tcp ...
```
- We can use `docker ps`:
```bash
$ docker ps
CONTAINER ID IMAGE ... PORTS ...
e40ffb406c9e nginx ... 0.0.0.0:`12345`->80/tcp ...
```
- This means:
* The web server is running on port 80 inside the container.
*port 12345 on the Docker host is mapped to port 80 in the container*
* This port is mapped to port 32768 on our Docker host.
- Now we need to connect to the Docker host!
We will explain the whys and hows of this port mapping.
---
But first, let's make sure that everything works properly.
## Finding the address of the Docker host
- When running Docker on your Linux workstation:
*use `localhost`, or any IP address of your machine*
- When running Docker on a remote Linux server:
*use any IP address of the remote machine*
- When running Docker Desktop on Mac or Windows:
*use `localhost`*
- In other scenarios (`docker-machine`, local VM...):
*use the IP address of the Docker VM*
---
## Connecting to our web server (GUI)
@@ -112,7 +81,7 @@ Make sure to use the right port number if it is different
from the example below:
```bash
$ curl localhost:12345
$ curl localhost:32768
<!DOCTYPE html>
<html>
<head>
@@ -147,41 +116,17 @@ IMAGE CREATED CREATED BY
---
## Why can't we just connect to port 80?
## Why are we mapping ports?
- Our Docker host has only one port 80
* We are out of IPv4 addresses.
- Therefore, we can only have one container at a time on port 80
* Containers cannot have public IPv4 addresses.
- Therefore, if multiple containers want port 80, only one can get it
* They have private addresses.
- By default, containers *do not* get "their" port number, but a random one
* Services have to be exposed port by port.
(not "random" as "crypto random", but as "it depends on various factors")
- We'll see later how to force a port number (including port 80!)
---
class: extra-details
## Using multiple IP addresses
*Hey, my network-fu is strong, and I have questions...*
- Can I publish one container on 127.0.0.2:80, and another on 127.0.0.3:80?
- My machine has multiple (public) IP addresses, let's say A.A.A.A and B.B.B.B.
<br/>
Can I have one container on A.A.A.A:80 and another on B.B.B.B:80?
- I have a whole IPV4 subnet, can I allocate it to my containers?
- What about IPV6?
You can do all these things when running Docker directly on Linux.
(On other platforms, *generally not*, but there are some exceptions.)
* Ports have to be mapped to avoid conflicts.
---
@@ -193,7 +138,7 @@ There is a command to help us:
```bash
$ docker port <containerID> 80
0.0.0.0:12345
32768
```
---
@@ -227,11 +172,13 @@ There are many ways to integrate containers in your network.
* Pick a fixed port number in advance, when you generate your configuration.
<br/>Then start your container by setting the port numbers manually.
* Use an orchestrator like Kubernetes or Swarm.
<br/>The orchestrator will provide its own networking facilities.
* Use a network plugin, connecting your containers with e.g. VLANs, tunnels...
Orchestrators typically provide mechanisms to enable direct container-to-container
communication across hosts, and publishing/load balancing for inbound traffic.
* Enable *Swarm Mode* to deploy across a cluster.
<br/>The container will then be reachable through any node of the cluster.
When using Docker through an extra management layer like Mesos or Kubernetes,
these will usually provide their own mechanism to expose containers.
---
@@ -255,34 +202,16 @@ $ docker inspect --format '{{ .NetworkSettings.IPAddress }}' <yourContainerID>
## Pinging our container
Let's try to ping our container *from another container.*
We can test connectivity to the container using the IP address we've
just discovered. Let's see this now by using the `ping` tool.
```bash
docker run alpine ping `<ipaddress>`
PING 172.17.0.X (172.17.0.X): 56 data bytes
64 bytes from 172.17.0.X: seq=0 ttl=64 time=0.106 ms
64 bytes from 172.17.0.X: seq=1 ttl=64 time=0.250 ms
64 bytes from 172.17.0.X: seq=2 ttl=64 time=0.188 ms
$ ping <ipAddress>
64 bytes from <ipAddress>: icmp_req=1 ttl=64 time=0.085 ms
64 bytes from <ipAddress>: icmp_req=2 ttl=64 time=0.085 ms
64 bytes from <ipAddress>: icmp_req=3 ttl=64 time=0.085 ms
```
When running on Linux, we can even ping that IP address directly!
(And connect to a container's ports even if they aren't published.)
---
## How often do we use `-p` and `-P` ?
- When running a stack of containers, we will often use Compose
- Compose will take care of exposing containers
(through a `ports:` section in the `docker-compose.yml` file)
- It is, however, fairly common to use `docker run -P` for a quick test
- Or `docker run -p ...` when an image doesn't `EXPOSE` a port correctly
---
## Section summary
@@ -291,11 +220,9 @@ We've learned how to:
* Expose a network port.
* Connect to an application running in a container.
* Manipulate container networking basics.
* Find a container's IP address.
???
:EN:- Exposing single containers
:FR:- Exposer un conteneur isolé
In the next chapter, we will see how to connect
containers together without exposing their ports.

View File

@@ -88,45 +88,13 @@ Success!
## Details
* We can `COPY` whole directories recursively
* You can `COPY` whole directories recursively.
* It is possible to do e.g. `COPY . .`
(but it might require some extra precautions to avoid copying too much)
* In older Dockerfiles, you might see the `ADD` command; consider it deprecated
(it is similar to `COPY` but can automatically extract archives)
* Older Dockerfiles also have the `ADD` instruction.
<br/>It is similar but can automatically extract archives.
* If we really wanted to compile C code in a container, we would:
* place it in a different directory, with the `WORKDIR` instruction
* Place it in a different directory, with the `WORKDIR` instruction.
* even better, use the `gcc` official image
---
class: extra-details
## `.dockerignore`
- We can create a file named `.dockerignore`
(at the top-level of the build context)
- It can contain file names and globs to ignore
- They won't be sent to the builder
(and won't end up in the resulting image)
- See the [documentation] for the little details
(exceptions can be made with `!`, multiple directory levels with `**`...)
[documentation]: https://docs.docker.com/engine/reference/builder/#dockerignore-file
???
:EN:- Leveraging the build cache for faster builds
:FR:- Tirer parti du cache afin d'optimiser la vitesse de *build*
* Even better, use the `gcc` official image.

View File

@@ -66,9 +66,9 @@ Adding the dependencies as a separate step means that Docker can cache more effi
```bash
FROM python
COPY requirements.txt /tmp/requirements.txt
RUN pip install -qr /tmp/requirements.txt
WORKDIR /src
COPY requirements.txt .
RUN pip install -qr requirements.txt
COPY . .
EXPOSE 5000
CMD ["python", "app.py"]
@@ -424,22 +424,10 @@ services:
- In this chapter, we showed many ways to write Dockerfiles.
- These Dockerfiles use sometimes diametrically opposed techniques.
- These Dockerfiles use sometimes diametrally opposed techniques.
- Yet, they were the "right" ones *for a specific situation.*
- It's OK (and even encouraged) to start simple and evolve as needed.
- Feel free to review this chapter later (after writing a few Dockerfiles) for inspiration!
???
:EN:Optimizing images
:EN:- Dockerfile tips, tricks, and best practices
:EN:- Reducing build time
:EN:- Reducing image size
:FR:Optimiser ses images
:FR:- Bonnes pratiques, trucs et astuces
:FR:- Réduire le temps de build
:FR:- Réduire la taille des images

View File

@@ -2,99 +2,4 @@
Let's write Dockerfiles for an existing application!
1. Check out the code repository
2. Read all the instructions
3. Write Dockerfiles
4. Build and test them individually
<!--
5. Test them together with the provided Compose file
-->
---
## Code repository
Clone the repository available at:
https://github.com/jpetazzo/wordsmith
It should look like this:
```
├── LICENSE
├── README
├── db/
│ └── words.sql
├── web/
│ ├── dispatcher.go
│ └── static/
└── words/
├── pom.xml
└── src/
```
---
## Instructions
The repository contains instructions in English and French.
<br/>
For now, we only care about the first part (about writing Dockerfiles).
<br/>
Place each Dockerfile in its own directory, like this:
```
├── LICENSE
├── README
├── db/
│ ├── `Dockerfile`
│ └── words.sql
├── web/
│ ├── `Dockerfile`
│ ├── dispatcher.go
│ └── static/
└── words/
├── `Dockerfile`
├── pom.xml
└── src/
```
---
## Build and test
Build and run each Dockerfile individually.
For `db`, we should be able to see some messages confirming that the data set
was loaded successfully (some `INSERT` lines in the container output).
For `web` and `words`, we should be able to see some message looking like
"server started successfully".
That's all we care about for now!
Bonus question: make sure that each container stops correctly when hitting Ctrl-C.
???
## Test with a Compose file
Place the following Compose file at the root of the repository:
```yaml
version: "3"
services:
db:
build: db
words:
build: words
web:
build: web
ports:
- 8888:80
```
Test the whole app by bringin up the stack and connecting to port 8888.
The code is at: https://github.com/jpetazzo/wordsmith

View File

@@ -106,7 +106,7 @@ root@04c0bb0a6c07:/# figlet hello
|_| |_|\___|_|_|\___/
```
Beautiful! 😍
Beautiful! .emoji[😍]
---
@@ -118,7 +118,7 @@ Let's check how many packages are installed there.
```bash
root@04c0bb0a6c07:/# dpkg -l | wc -l
97
190
```
* `dpkg -l` lists the packages installed in our container
@@ -175,7 +175,7 @@ Now try to run `figlet`. Does that work?
* We can run *any container* on *any host*.
(One exception: Windows containers can only run on Windows hosts; at least for now.)
(One exception: Windows containers cannot run on Linux machines; at least not yet.)
---
@@ -290,8 +290,3 @@ bash: figlet: command not found
* We have a clear definition of our environment, and can share it reliably with others.
* Let's see in the next chapters how to bake a custom image with `figlet`!
???
:EN:- Running our first container
:FR:- Lancer nos premiers conteneurs

View File

@@ -226,8 +226,3 @@ docker export <container_id> | tar tv
```
This will give a detailed listing of the content of the container.
???
:EN:- Troubleshooting and getting inside a container
:FR:- Inspecter un conteneur en détail, en *live* ou *post-mortem*

View File

@@ -56,8 +56,6 @@ Each of the following items will correspond to one layer:
* Our application code and assets
* Our application configuration
(Note: app config is generally added by orchestration facilities.)
---
class: pic
@@ -369,44 +367,6 @@ This is similar to what we would do with `pip install`, `npm install`, etc.
---
class: extra-details
## Multi-arch images
- An image can support multiple architectures
- More precisely, a specific *tag* in a given *repository* can have either:
- a single *manifest* referencing an image for a single architecture
- a *manifest list* (or *fat manifest*) referencing multiple images
- In a *manifest list*, each image is identified by a combination of:
- `os` (linux, windows)
- `architecture` (amd64, arm, arm64...)
- optional fields like `variant` (for arm and arm64), `os.version` (for windows)
---
class: extra-details
## Working with multi-arch images
- The Docker Engine will pull "native" images when available
(images matching its own os/architecture/variant)
- We can ask for a specific image platform with `--platform`
- The Docker Engine can run non-native images thanks to QEMU+binfmt
(automatically on Docker Desktop; with a bit of setup on Linux)
---
## Section summary
We've learned how to:
@@ -415,13 +375,3 @@ We've learned how to:
* Understand Docker image namespacing.
* Search and download images.
???
:EN:Building images
:EN:- Containers, images, and layers
:EN:- Image addresses and tags
:EN:- Finding and transferring images
:FR:Construire des images
:FR:- La différence entre un conteneur et une image
:FR:- La notion de *layer* partagé entre images

Some files were not shown because too many files have changed in this diff Show More