diff --git a/k8s/efk.yaml b/k8s/efk.yaml new file mode 100644 index 00000000..d2bcdbda --- /dev/null +++ b/k8s/efk.yaml @@ -0,0 +1,222 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: fluentd + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: fluentd +rules: +- apiGroups: + - "" + resources: + - pods + - namespaces + verbs: + - get + - list + - watch + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: fluentd +roleRef: + kind: ClusterRole + name: fluentd + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: fluentd + namespace: default + +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: fluentd + labels: + k8s-app: fluentd-logging + version: v1 + kubernetes.io/cluster-service: "true" +spec: + template: + metadata: + labels: + k8s-app: fluentd-logging + version: v1 + kubernetes.io/cluster-service: "true" + spec: + serviceAccount: fluentd + serviceAccountName: fluentd + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: fluentd + image: fluent/fluentd-kubernetes-daemonset:elasticsearch + env: + - name: FLUENT_ELASTICSEARCH_HOST + value: "elasticsearch" + - name: FLUENT_ELASTICSEARCH_PORT + value: "9200" + - name: FLUENT_ELASTICSEARCH_SCHEME + value: "http" + # X-Pack Authentication + # ===================== + - name: FLUENT_ELASTICSEARCH_USER + value: "elastic" + - name: FLUENT_ELASTICSEARCH_PASSWORD + value: "changeme" + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + creationTimestamp: null + generation: 1 + labels: + run: elasticsearch + name: elasticsearch + selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + run: elasticsearch + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + run: elasticsearch + spec: + containers: + - image: elasticsearch:5.6.8 + imagePullPolicy: IfNotPresent + name: elasticsearch + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + run: elasticsearch + name: elasticsearch + selfLink: /api/v1/namespaces/default/services/elasticsearch +spec: + ports: + - port: 9200 + protocol: TCP + targetPort: 9200 + selector: + run: elasticsearch + sessionAffinity: None + type: ClusterIP + +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + creationTimestamp: null + generation: 1 + labels: + run: kibana + name: kibana + selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + run: kibana + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + run: kibana + spec: + containers: + - env: + - name: ELASTICSEARCH_URL + value: http://elasticsearch:9200/ + image: kibana:5.6.8 + imagePullPolicy: Always + name: kibana + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + run: kibana + name: kibana + selfLink: /api/v1/namespaces/default/services/kibana +spec: + externalTrafficPolicy: Cluster + ports: + - port: 5601 + protocol: TCP + targetPort: 5601 + selector: + run: kibana + sessionAffinity: None + type: NodePort diff --git a/k8s/grant-admin-to-dashboard.yaml b/k8s/grant-admin-to-dashboard.yaml new file mode 100644 index 00000000..59daae9e --- /dev/null +++ b/k8s/grant-admin-to-dashboard.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard + labels: + k8s-app: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system \ No newline at end of file diff --git a/k8s/kubernetes-dashboard.yaml b/k8s/kubernetes-dashboard.yaml new file mode 100644 index 00000000..73fcc239 --- /dev/null +++ b/k8s/kubernetes-dashboard.yaml @@ -0,0 +1,167 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration to deploy release version of the Dashboard UI compatible with +# Kubernetes 1.8. +# +# Example usage: kubectl create -f + +# ------------------- Dashboard Secret ------------------- # + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- +# ------------------- Dashboard Service Account ------------------- # + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- +# ------------------- Dashboard Role & Role Binding ------------------- # + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubernetes-dashboard-minimal + namespace: kube-system +rules: + # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] + # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics from heapster. +- apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster"] + verbs: ["proxy"] +- apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:"] + verbs: ["get"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kubernetes-dashboard-minimal + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard-minimal +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- +# ------------------- Dashboard Deployment ------------------- # + +kind: Deployment +apiVersion: apps/v1beta2 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- +# ------------------- Dashboard Service ------------------- # + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard diff --git a/k8s/netpol-allow-testcurl-for-testweb.yaml b/k8s/netpol-allow-testcurl-for-testweb.yaml new file mode 100644 index 00000000..c0a73f13 --- /dev/null +++ b/k8s/netpol-allow-testcurl-for-testweb.yaml @@ -0,0 +1,14 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-testcurl-for-testweb +spec: + podSelector: + matchLabels: + run: testweb + ingress: + - from: + - podSelector: + matchLabels: + run: testcurl + diff --git a/k8s/netpol-deny-all-for-testweb.yaml b/k8s/netpol-deny-all-for-testweb.yaml new file mode 100644 index 00000000..e975991c --- /dev/null +++ b/k8s/netpol-deny-all-for-testweb.yaml @@ -0,0 +1,10 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: deny-all-for-testweb +spec: + podSelector: + matchLabels: + run: testweb + ingress: [] + diff --git a/k8s/socat.yaml b/k8s/socat.yaml new file mode 100644 index 00000000..5d4b2ca5 --- /dev/null +++ b/k8s/socat.yaml @@ -0,0 +1,67 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "2" + creationTimestamp: null + generation: 1 + labels: + run: socat + name: socat + namespace: kube-system + selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat +spec: + replicas: 1 + selector: + matchLabels: + run: socat + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + run: socat + spec: + containers: + - args: + - sh + - -c + - apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0 + image: alpine + imagePullPolicy: Always + name: socat + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + run: socat + name: socat + namespace: kube-system + selfLink: /api/v1/namespaces/kube-system/services/socat +spec: + externalTrafficPolicy: Cluster + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + run: socat + sessionAffinity: None + type: NodePort +status: + loadBalancer: {} diff --git a/prepare-vms/lib/commands.sh b/prepare-vms/lib/commands.sh index d88e7a38..b7a40a24 100644 --- a/prepare-vms/lib/commands.sh +++ b/prepare-vms/lib/commands.sh @@ -168,6 +168,22 @@ _cmd_kube() { sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443 fi" + # Install stern + pssh " + if [ ! -x /usr/local/bin/stern ]; then + sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64 + sudo chmod +x /usr/local/bin/stern + stern --completion bash | sudo tee /etc/bash_completion.d/stern + fi" + + # Install helm + pssh " + if [ ! -x /usr/local/bin/helm ]; then + curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash + helm completion bash | sudo tee /etc/bash_completion.d/helm + fi" + + sep "Done" } diff --git a/slides/index.yaml b/slides/index.yaml index 269ff23f..55ad0437 100644 --- a/slides/index.yaml +++ b/slides/index.yaml @@ -1,3 +1,11 @@ +- date: 2018-11-23 + city: Copenhagen + country: dk + event: GOTO + title: Build Container Orchestration with Docker Swarm + speaker: bretfisher + attend: https://gotocph.com/2018/workshops/121 + - date: 2018-11-08 city: San Francisco, CA country: us @@ -22,6 +30,14 @@ speaker: bridgetkromhout attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71149 +- date: 2018-10-30 + city: London, UK + country: uk + event: Velocity EU + title: "Docker Zero to Hero: Docker, Compose and Production Swarm" + speaker: bretfisher + attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71231 + - date: 2018-07-12 city: Minneapolis, MN country: us @@ -47,6 +63,14 @@ speaker: jpetazzo attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/69875 +- date: 2018-09-30 + city: New York, NY + country: us + event: Velocity + title: "Docker Zero to Hero: Docker, Compose and Production Swarm" + speaker: bretfisher + attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/70147 + - date: 2018-09-17 country: fr city: Paris diff --git a/slides/k8s/dashboard.md b/slides/k8s/dashboard.md index 21cf314b..cdef44d2 100644 --- a/slides/k8s/dashboard.md +++ b/slides/k8s/dashboard.md @@ -32,15 +32,11 @@ There is an additional step to make the dashboard available from outside (we'll - Create all the dashboard resources, with the following command: ```bash - kubectl apply -f https://goo.gl/Qamqab + kubectl apply -f ~/container.training/k8s/kubernetes-dashboard.yaml ``` ] -The goo.gl URL expands to: -
-.small[https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml] - --- @@ -72,15 +68,11 @@ The goo.gl URL expands to: - Apply the convenient YAML file, and defeat SSL protection: ```bash - kubectl apply -f https://goo.gl/tA7GLz + kubectl apply -f ~/container.training/k8s/socat.yaml ``` ] -The goo.gl URL expands to: -
-.small[.small[https://gist.githubusercontent.com/jpetazzo/c53a28b5b7fdae88bc3c5f0945552c04/raw/da13ef1bdd38cc0e90b7a4074be8d6a0215e1a65/socat.yaml]] - .warning[All our dashboard traffic is now clear-text, including passwords!] --- @@ -135,7 +127,7 @@ The dashboard will then ask you which authentication you want to use. - Grant admin privileges to the dashboard so we can see our resources: ```bash - kubectl apply -f https://goo.gl/CHsLTA + kubectl apply -f ~/container.training/k8s/grant-admin-to-dashboard.yaml ``` - Reload the dashboard and enjoy! @@ -175,7 +167,7 @@ The dashboard will then ask you which authentication you want to use. ## Editing the `kubernetes-dashboard` service -- If we look at the [YAML](https://goo.gl/Qamqab) that we loaded before, we'll get a hint +- If we look at the [YAML](https://github.com/jpetazzo/container.training/blob/master/k8s/kubernetes-dashboard.yaml) that we loaded before, we'll get a hint -- diff --git a/slides/k8s/helm.md b/slides/k8s/helm.md index 2df1edd1..809d62ef 100644 --- a/slides/k8s/helm.md +++ b/slides/k8s/helm.md @@ -34,27 +34,47 @@ ## Installing Helm -- We need to install the `helm` CLI; then use it to deploy `tiller` +- If the `helm` CLI is not installed in your environment, install it .exercise[ -- Install the `helm` CLI: +- Check if `helm` is installed: + ```bash + helm + ``` + +- If it's not installed, run the following command: ```bash curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash ``` -- Deploy `tiller`: +] + +--- + +## Installing Tiller + +- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace + +- They can be managed (installed, upgraded...) with the `helm` CLI + +.exercise[ + +- Deploy Tiller: ```bash helm init ``` -- Add the `helm` completion: - ```bash - . <(helm completion $(basename $SHELL)) - ``` - ] +If Tiller was already installed, don't worry: this won't break it. + +At the end of the install process, you will see: + +``` +Happy Helming! +``` + --- ## Fix account permissions diff --git a/slides/k8s/kubectlproxy.md b/slides/k8s/kubectlproxy.md index 6ae9f4c5..883f6e50 100644 --- a/slides/k8s/kubectlproxy.md +++ b/slides/k8s/kubectlproxy.md @@ -1,4 +1,71 @@ -# Accessing internal services with `kubectl proxy` +# Accessing the API with `kubectl proxy` + +- The API requires us to authenticate.red[¹] + +- There are many authentication methods available, including: + + - TLS client certificates +
+ (that's what we've used so far) + + - HTTP basic password authentication +
+ (from a static file; not recommended) + + - various token mechanisms +
+ (detailed in the [documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#authentication-strategies)) + +.red[¹]OK, we lied. If you don't authenticate, you are considered to +be user `system:anonymous`, which doesn't have any access rights by default. + +--- + +## Accessing the API directly + +- Let's see what happens if we try to access the API directly with `curl` + +.exercise[ + +- Retrieve the ClusterIP allocated to the `kubernetes` service: + ```bash + kubectl get svc kubernetes + ``` + +- Replace the IP below and try to connect with `curl`: + ```bash + curl -k https://`10.96.0.1`/ + ``` + +] + +The API will tell us that user `system:anonymous` cannot access this path. + +--- + +## Authenticating to the API + +If we wanted to talk to the API, we would need to: + +- extract our TLS key and certificate information from `~/.kube/config` + + (the information is in PEM format, encoded in base64) + +- use that information to present our certificate when connecting + + (for instance, with `openssl s_client -key ... -cert ... -connect ...`) + +- figure out exactly which credentials to use + + (once we start juggling multiple clusters) + +- change that whole process if we're using another authentication method + +🤔 There has to be a better way! + +--- + +## Using `kubectl proxy` for authentication - `kubectl proxy` runs a proxy in the foreground @@ -10,15 +77,40 @@ - This is a great tool to learn and experiment with the Kubernetes API -- The Kubernetes API also gives us a proxy to HTTP and HTTPS services +- ... And for serious usages as well (suitable for one-shot scripts) -- Therefore, we can use `kubectl proxy` to access internal services - - (Without using a `NodePort` or similar service) +- For unattended use, it is better to create a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) --- -## Secure by default +## Trying `kubectl proxy` + +- Let's start `kubectl proxy` and then do a simple request with `curl`! + +.exercise[ + +- Start `kubectl proxy` in the background: + ```bash + kubectl proxy & + ``` + +- Access the API's default route: + ```bash + curl localhost:8001 + ``` + +- Terminate the proxy: + ```bash + kill %1 + ``` + +] + +The output is a list of available API routes. + +--- + +## `kubectl proxy` is intended for local use - By default, the proxy listens on port 8001 @@ -34,84 +126,54 @@ - This is great when running `kubectl proxy` locally -- Not-so-great when running it on a remote machine +- Not-so-great when you want to connect to the proxy from a remote machine --- ## Running `kubectl proxy` on a remote machine -- We are going to bind to `INADDR_ANY` instead of `127.0.0.1` +- If we wanted to connect to the proxy from another machine, we would need to: -- We are going to accept connections from any address + - bind to `INADDR_ANY` instead of `127.0.0.1` -.exercise[ + - accept connections from any address -- Run an open proxy to the Kubernetes API: +- This is achieved with: ``` kubectl proxy --port=8888 --address=0.0.0.0 --accept-hosts=.* ``` -] - -.warning[Anyone can now do whatever they want with our Kubernetes cluster! -
-(Don't do this on a real cluster!)] +.warning[Do not do this on a real cluster: it opens full unauthenticated access!] --- -## Viewing available API routes +## Security considerations -- The default route (i.e. `/`) shows a list of available API endpoints +- Running `kubectl proxy` openly is a huge security risk -.exercise[ +- It is slightly better to run the proxy where you need it -- Point your browser to the IP address of the node running `kubectl proxy`, port 8888 + (and copy credentials, e.g. `~/.kube/config`, to that place) -] - -The result should look like this: -```json -{ - "paths": [ - "/api", - "/api/v1", - "/apis", - "/apis/", - "/apis/admissionregistration.k8s.io", - … -``` +- It is even better to use a limited account with reduced permissions --- -## Connecting to a service through the proxy +## Good to know ... -- The API can proxy HTTP and HTTPS requests by accessing a special route: +- `kubectl proxy` also gives access to all internal services + +- Specifically, services are exposed as such: ``` - /api/v1/namespaces/`name_of_namespace`/services/`name_of_service`/proxy + /api/v1/namespaces//services//proxy ``` -- Since we now have access to the API, we can use this special route +- We can use `kubectl proxy` to access an internal service in a pinch -.exercise[ + (or, for non HTTP services, `kubectl port-forward`) -- Access the `hasher` service through the special proxy route: - ```open - http://`X.X.X.X`:8888/api/v1/namespaces/default/services/hasher/proxy - ``` +- This is not very useful when running `kubectl` directly on the cluster -] - -You should see the banner of the hasher service: `HASHER running on ...` - ---- - -## Stopping the proxy - -- Remember: as it is running right now, `kubectl proxy` gives open access to our cluster - -.exercise[ - -- Stop the `kubectl proxy` process with Ctrl-C - -] + (since we could connect to the services directly anyway) +- But it is very powerful as soon as you run `kubectl` from a remote machine diff --git a/slides/k8s/logs-centralized.md b/slides/k8s/logs-centralized.md index a707e0b9..6cdddda8 100644 --- a/slides/k8s/logs-centralized.md +++ b/slides/k8s/logs-centralized.md @@ -40,12 +40,12 @@ - Load the YAML file into our cluster: ```bash - kubectl apply -f https://goo.gl/MUZhE4 + kubectl apply -f ~/container.training/k8s/efk.yaml ``` ] -If we [look at the YAML file](https://goo.gl/MUZhE4), we see that +If we [look at the YAML file](https://github.com/jpetazzo/container.training/blob/master/k8s/efk.yaml), we see that it creates a daemon set, two deployments, two services, and a few roles and role bindings (to give fluentd the required permissions). diff --git a/slides/k8s/logs-cli.md b/slides/k8s/logs-cli.md index 5e23e3a1..1a91f6f5 100644 --- a/slides/k8s/logs-cli.md +++ b/slides/k8s/logs-cli.md @@ -47,22 +47,24 @@ Exactly what we need! ## Installing Stern -- For simplicity, let's just grab a binary release +- Run `stern` (without arguments) to check if it's installed: -.exercise[ + ``` + $ stern + Tail multiple pods and containers from Kubernetes -- Download a binary release from GitHub: - ```bash - sudo curl -L -o /usr/local/bin/stern \ - https://github.com/wercker/stern/releases/download/1.6.0/stern_linux_amd64 - sudo chmod +x /usr/local/bin/stern + Usage: + stern pod-query [flags] ``` -] +- If it is not installed, the easiest method is to download a [binary release](https://github.com/wercker/stern/releases) -These installation instructions will work on our clusters, since they are Linux amd64 VMs. - -However, you will have to adapt them if you want to install Stern on your local machine. +- The following commands will install Stern on a Linux Intel 64 bits machine: + ```bash + sudo curl -L -o /usr/local/bin/stern \ + https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64 + sudo chmod +x /usr/local/bin/stern + ``` --- diff --git a/slides/k8s/netpol.md b/slides/k8s/netpol.md new file mode 100644 index 00000000..dcd76d96 --- /dev/null +++ b/slides/k8s/netpol.md @@ -0,0 +1,268 @@ +# Network policies + +- Namespaces help us to *organize* resources + +- Namespaces do not provide isolation + +- By default, every pod can contact every other pod + +- By default, every service accepts traffic from anyone + +- If we want this to be different, we need *network policies* + +--- + +## What's a network policy? + +A network policy is defined by the following things. + +- A *pod selector* indicating which pods it applies to + + e.g.: "all pods in namespace `blue` with the label `zone=internal`" + +- A list of *ingress rules* indicating which inbound traffic is allowed + + e.g.: "TCP connections to ports 8000 and 8080 coming from pods with label `zone=dmz`, + and from the external subnet 4.42.6.0/24, except 4.42.6.5" + +- A list of *egress rules* indicating which outbound traffic is allowed + +A network policy can provide ingress rules, egress rules, or both. + +--- + +## How do network policies apply? + +- A pod can be "selected" by any number of network policies + +- If a pod isn't selected by any network policy, then its traffic is unrestricted + + (In other words: in the absence of network policies, all traffic is allowed) + +- If a pod is selected by at least one network policy, then all traffic is blocked ... + + ... unless it is explicitly allowed by one of these network policies + +--- + +class: extra-details + +## Traffic filtering is flow-oriented + +- Network policies deal with *connections*, not individual packets + +- Example: to allow HTTP (80/tcp) connections to pod A, you only need an ingress rule + + (You do not need a matching egress rule to allow response traffic to go through) + +- This also applies for UDP traffic + + (Allowing DNS traffic can be done with a single rule) + +- Network policy implementations use stateful connection tracking + +--- + +## Pod-to-pod traffic + +- Connections from pod A to pod B have to be allowed by both pods: + + - pod A has to be unrestricted, or allow the connection as an *egress* rule + + - pod B has to be unrestricted, or allow the connection as an *ingress* rule + +- As a consequence: if a network policy restricts traffic going from/to a pod, +
+ the restriction cannot be overridden by a network policy selecting another pod + +- This prevents an entity managing network policies in namespace A + (but without permission to do so in namespace B) + from adding network policies giving them access to namespace B + +--- + +## The rationale for network policies + +- In network security, it is generally considered better to "deny all, then allow selectively" + + (The other approach, "allow all, then block selectively" makes it too easy to leave holes) + +- As soon as one network policy selects a pod, the pod enters this "deny all" logic + +- Further network policies can open additional access + +- Good network policies should be scoped as precisely as possible + +- In particular: make sure that the selector is not too broad + + (Otherwise, you end up affecting pods that were otherwise well secured) + +--- + +## Our first network policy + +This is our game plan: + +- run a web server in a pod + +- create a network policy to block all access to the web server + +- create another network policy to allow access only from specific pods + +--- + +## Running our test web server + +.exercise[ + +- Let's use the `nginx` image: + ```bash + kubectl run testweb --image=nginx + ``` + +- Find out the IP address of the pod with one of these two commands: + ```bash + kubectl get pods -o wide -l run=testweb + IP=$(kubectl get pods -l run=testweb -o json | jq -r .items[0].status.podIP) + ``` + +- Check that we can connect to the server: + ```bash + curl $IP + ``` +] + +The `curl` command should show us the "Welcome to nginx!" page. + +--- + +## Adding a very restrictive network policy + +- The policy will select pods with the label `run=testweb` + +- It will specify an empty list of ingress rules (matching nothing) + +.exercise[ + +- Apply the policy in this YAML file: + ```bash + kubectl apply -f ~/container.training/k8s/netpol-deny-all-for-testweb.yaml + ``` + +- Check if we can still access the server: + ```bash + curl $IP + ``` + +] + +The `curl` command should now time out. + +--- + +## Looking at the network policy + +This is the file that we applied: + +```yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: deny-all-for-testweb +spec: + podSelector: + matchLabels: + run: testweb + ingress: [] +``` + +--- + +## Allowing connections only from specific pods + +- We want to allow traffic from pods with the label `run=testcurl` + +- Reminder: this label is automatically applied when we do `kubectl run testcurl ...` + +.exercise[ + +- Apply another policy: + ```bash + kubectl apply -f ~/container.training/netpol-allow-testcurl-for-testweb.yaml + ``` + +] + +--- + +## Looking at the network policy + +This is the second file that we applied: + +```yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-testcurl-for-testweb +spec: + podSelector: + matchLabels: + run: testweb + ingress: + - from: + - podSelector: + matchLabels: + run: testcurl +``` + +--- + +## Testing the network policy + +- Let's create pods with, and without, the required label + +.exercise[ + +- Try to connect to testweb from a pod with the `run=testcurl` label: + ```bash + kubectl run testcurl --rm -i --image=centos -- curl -m3 $IP + ``` + +- Try to connect to testweb with a different label: + ```bash + kubectl run testkurl --rm -i --image=centos -- curl -m3 $IP + ``` + +] + +The first command will work (and show the "Welcome to nginx!" page). + +The second command will fail and time out after 3 seconds. + +(The timeout is obtained with the `-m3` option.) + +--- + +## An important warning + +- Some network plugins only have partial support for network policies + +- For instance, Weave [doesn't support ipBlock (yet)](https://github.com/weaveworks/weave/issues/3168) + +- Weave added support for egress rules [in version 2.4](https://github.com/weaveworks/weave/pull/3313) (released in July 2018) + +- Unsupported features might be silently ignored + + (Making you believe that you are secure, when you're not) + +--- + +## Further resources + +- As always, the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) is a good starting point + +- And two resources by [Ahmet Alp Balkan](https://ahmet.im/): + + - a [very good talk about network policies](https://www.youtube.com/watch?list=PLj6h78yzYM2P-3-xqvmWaZbbI1sW-ulZb&v=3gGpMmYeEO8) at KubeCon North America 2017 + + - a repository of [ready-to-use recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) for network policies diff --git a/slides/k8s/setup-k8s.md b/slides/k8s/setup-k8s.md index e1e637a7..2d0c5419 100644 --- a/slides/k8s/setup-k8s.md +++ b/slides/k8s/setup-k8s.md @@ -73,7 +73,10 @@ [kubespray](https://github.com/kubernetes-incubator/kubespray) - If you like Terraform: - [typhoon](https://github.com/poseidon/typhoon/) + [typhoon](https://github.com/poseidon/typhoon) + +- If you like Terraform and Puppet: + [tarmak](https://github.com/jetstack/tarmak) - You can also learn how to install every component manually, with the excellent tutorial [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) diff --git a/slides/kube-fullday.yml b/slides/kube-fullday.yml index 98b0803b..173668d8 100644 --- a/slides/kube-fullday.yml +++ b/slides/kube-fullday.yml @@ -38,10 +38,11 @@ chapters: - - k8s/kubectlscale.md - k8s/daemonset.md - k8s/rollout.md - #- k8s/logs-cli.md - #- k8s/logs-centralized.md - #- k8s/helm.md - #- k8s/namespaces.md + - k8s/logs-cli.md + - k8s/logs-centralized.md + - k8s/helm.md + - k8s/namespaces.md + - k8s/netpol.md - k8s/whatsnext.md - k8s/links.md - shared/thankyou.md diff --git a/slides/kube-halfday.yml b/slides/kube-halfday.yml index 01b9dd86..be3a12a5 100644 --- a/slides/kube-halfday.yml +++ b/slides/kube-halfday.yml @@ -45,6 +45,7 @@ chapters: #- k8s/logs-centralized.md - k8s/helm.md - k8s/namespaces.md + #- k8s/netpol.md - k8s/whatsnext.md # - k8s/links.md # Bridget-specific diff --git a/slides/kube-selfpaced.yml b/slides/kube-selfpaced.yml index 190af30a..00f7f97f 100644 --- a/slides/kube-selfpaced.yml +++ b/slides/kube-selfpaced.yml @@ -41,6 +41,7 @@ chapters: - k8s/logs-centralized.md - k8s/helm.md - k8s/namespaces.md + - k8s/netpol.md - k8s/whatsnext.md - k8s/links.md - shared/thankyou.md