Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f77af5b9c | ||
|
|
2d224ebe9c |
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "container.training environment to get started with Docker and/or Kubernetes",
|
||||
"image": "ghcr.io/jpetazzo/shpod",
|
||||
"features": {
|
||||
//"ghcr.io/devcontainers/features/common-utils:2": {}
|
||||
},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": [],
|
||||
|
||||
//"postCreateCommand": "... install extra packages...",
|
||||
"postStartCommand": "dind.sh ; kind.sh",
|
||||
|
||||
// This lets us use "docker-outside-docker".
|
||||
// Unfortunately, minikube, kind, etc. don't work very well that way;
|
||||
// so for now, we'll likely use "docker-in-docker" instead (with a
|
||||
// privilege dcontainer). But we're still exposing that socket in case
|
||||
// someone wants to do something interesting with it.
|
||||
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
|
||||
|
||||
// This is for docker-in-docker.
|
||||
"privileged": true,
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
"remoteUser": "k8s"
|
||||
}
|
||||
37
.gitignore
vendored
@@ -1,35 +1,8 @@
|
||||
*.pyc
|
||||
*.swp
|
||||
*~
|
||||
|
||||
**/terraform.tfstate
|
||||
**/terraform.tfstate.backup
|
||||
prepare-labs/terraform/lab-environments
|
||||
prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
|
||||
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
|
||||
prepare-labs/terraform/tags
|
||||
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
|
||||
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
|
||||
prepare-labs/www
|
||||
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
slides/_academy_*
|
||||
slides/fragments
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
### Windows ###
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
prepare-vms/ips.txt
|
||||
prepare-vms/ips.html
|
||||
prepare-vms/ips.pdf
|
||||
prepare-vms/settings.yaml
|
||||
prepare-vms/tags
|
||||
|
||||
24
CHECKLIST.md
@@ -1,24 +0,0 @@
|
||||
Checklist to use when delivering a workshop
|
||||
Authored by Jérôme; additions by Bridget
|
||||
|
||||
- [ ] Create event-named branch (such as `conferenceYYYY`) in the [main repo](https://github.com/jpetazzo/container.training/)
|
||||
- [ ] Create file `slides/_redirects` containing a link to the desired tutorial: `/ /kube-halfday.yml.html 200`
|
||||
- [ ] Push local branch to GitHub and merge into main repo
|
||||
- [ ] [Netlify setup](https://app.netlify.com/sites/container-training/settings/domain): create subdomain for event-named branch
|
||||
- [ ] Add link to event-named branch to [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] Update the slides that says which versions we are using for [kube](https://github.com/jpetazzo/container.training/blob/master/slides/kube/versions-k8s.md) or [swarm](https://github.com/jpetazzo/container.training/blob/master/slides/swarm/versions.md) workshops
|
||||
- [ ] Update the version of Compose and Machine in [settings](https://github.com/jpetazzo/container.training/tree/master/prepare-vms/settings)
|
||||
- [ ] (optional) Create chatroom
|
||||
- [ ] (optional) Set chatroom in YML ([kube half-day example](https://github.com/jpetazzo/container.training/blob/master/slides/kube-halfday.yml#L6-L8)) and deploy
|
||||
- [ ] (optional) Put chat link on [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] How many VMs do we need? Check with event organizers ahead of time
|
||||
- [ ] Provision VMs (slightly more than we think we'll need)
|
||||
- [ ] Change password on presenter's VMs (to forestall any hijinx)
|
||||
- [ ] Onsite: walk the room to count seats, check power supplies, lectern, A/V setup
|
||||
- [ ] Print cards
|
||||
- [ ] Cut cards
|
||||
- [ ] Last-minute merge from master
|
||||
- [ ] Check that all looks good
|
||||
- [ ] DELIVER!
|
||||
- [ ] Shut down VMs
|
||||
- [ ] Update index.html to remove chat link and move session to past things
|
||||
19
LICENSE
@@ -1,12 +1,13 @@
|
||||
The code in this repository is licensed under the Apache License
|
||||
Version 2.0. You may obtain a copy of this license at:
|
||||
Copyright 2015 Jérôme Petazzoni
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
The instructions and slides in this repository (e.g. the files
|
||||
with extension .md and .yml in the "slides" subdirectory) are
|
||||
under the Creative Commons Attribution 4.0 International Public
|
||||
License. You may obtain a copy of this license at:
|
||||
|
||||
https://creativecommons.org/licenses/by/4.0/legalcode
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
297
README.md
@@ -1,134 +1,137 @@
|
||||
# Container Training
|
||||
# Docker Orchestration Workshop
|
||||
|
||||
This repository (formerly known as `orchestration-workshop`)
|
||||
contains materials (slides, scripts, demo app, and other
|
||||
code samples) used for various workshops, tutorials, and
|
||||
training sessions around the themes of Docker, containers,
|
||||
and orchestration.
|
||||
|
||||
For the moment, it includes:
|
||||
|
||||
- Introduction to Docker and Containers,
|
||||
- Container Orchestration with Docker Swarm,
|
||||
- Container Orchestration with Kubernetes.
|
||||
|
||||
These materials have been designed around the following
|
||||
principles:
|
||||
|
||||
- they assume very little prior knowledge of Docker,
|
||||
containers, or a particular programming language;
|
||||
- they can be used in a classroom setup (with an
|
||||
instructor), or self-paced at home;
|
||||
- they are hands-on, meaning that they contain lots
|
||||
of examples and exercises that you can easily
|
||||
reproduce;
|
||||
- they progressively introduce concepts in chapters
|
||||
that build on top of each other.
|
||||
|
||||
If you're looking for the materials, you can stop reading
|
||||
right now, and hop to http://container.training/, which
|
||||
hosts all the slides decks available.
|
||||
|
||||
The rest of this document explains how this repository
|
||||
is structured, and how to use it to deliver (or create)
|
||||
your own tutorials.
|
||||
This is the material (slides, scripts, demo app, and other
|
||||
code samples) for the "Docker orchestration workshop"
|
||||
written and delivered by Jérôme Petazzoni (and lots of others)
|
||||
non-stop since June 2015.
|
||||
|
||||
|
||||
## Why a single repository?
|
||||
## Content
|
||||
|
||||
All these materials have been gathered in a single repository
|
||||
because they have a few things in common:
|
||||
|
||||
- some [shared slides](slides/shared/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
- a [semi-automated test harness](slides/autopilot/) to check
|
||||
that the exercises and examples provided work properly;
|
||||
- a [PhantomJS script](slides/slidechecker.js) to check
|
||||
that the slides look good and don't have formatting issues;
|
||||
- [deployment scripts](prepare-vms/) to start training
|
||||
VMs in bulk;
|
||||
- a fancy pipeline powered by
|
||||
[Netlify](https://www.netlify.com/) and continuously
|
||||
deploying `master` to http://container.training/.
|
||||
- Chapter 1: Getting Started: running apps with docker-compose
|
||||
- Chapter 2: Scaling out with Swarm Mode
|
||||
- Chapter 3: Operating the Swarm (networks, updates, logging, metrics)
|
||||
- Chapter 4: Deeper in Swarm (stateful services, scripting, DAB's)
|
||||
|
||||
|
||||
## What are the different courses available?
|
||||
## Quick start (or, "I want to try it!")
|
||||
|
||||
**Introduction to Docker** is derived from the first
|
||||
"Docker Fundamentals" training materials. For more information,
|
||||
see [jpetazzo/intro-to-docker](https://github.com/jpetazzo/intro-to-docker).
|
||||
The version in this repository has been adapted to the Markdown
|
||||
publishing pipeline. It is still maintained, but only receives
|
||||
minor updates once in a while.
|
||||
This workshop is designed to be *hands on*, i.e. to give you a step-by-step
|
||||
guide where you will build your own Docker cluster, and use it to deploy
|
||||
a sample application.
|
||||
|
||||
**Container Orchestration with Docker Swarm** (formerly
|
||||
known as "Orchestration Workshop") is a workshop created by Jérôme
|
||||
Petazzoni in June 2015. Since then, it has been continuously updated
|
||||
and improved, and received contributions from many others authors.
|
||||
It is actively maintained.
|
||||
The easiest way to follow the workshop is to attend it when it is delivered
|
||||
by an instructor. In that case, the instructor will generally give you
|
||||
credentials (IP addresses, login, password) to connect to your own cluster
|
||||
of virtual machines; and the [slides](http://jpetazzo.github.io/orchestration-workshop)
|
||||
assume that you have your own cluster indeed.
|
||||
|
||||
**Container Orchestration with Kubernetes** was created by
|
||||
Jérôme Petazzoni in October 2017, with help and feedback from
|
||||
a few other contributors. It is actively maintained.
|
||||
If you want to follow the workshop on your own, and want to have your
|
||||
own cluster, we have multiple solutions for you!
|
||||
|
||||
|
||||
## Repository structure
|
||||
### Using [play-with-docker](http://play-with-docker.com/)
|
||||
|
||||
- [bin](bin/)
|
||||
- A few helper scripts that you can safely ignore for now.
|
||||
- [dockercoins](dockercoins/)
|
||||
- The demo app used throughout the orchestration workshops.
|
||||
- [efk](efk/), [elk](elk/), [prom](prom/), [snap](snap/):
|
||||
- Logging and metrics stacks used in the later parts of
|
||||
the orchestration workshops.
|
||||
- [prepare-local](prepare-local/), [prepare-machine](prepare-machine/):
|
||||
- Contributed scripts to automate the creation of local environments.
|
||||
These could use some help to test/check that they work.
|
||||
- [prepare-vms](prepare-vms/):
|
||||
- Scripts to automate the creation of AWS instances for students.
|
||||
These are routinely used and actively maintained.
|
||||
- [slides](slides/):
|
||||
- All the slides! They are assembled from Markdown files with
|
||||
a custom Python script, and then rendered using [gnab/remark](
|
||||
https://github.com/gnab/remark). Check this directory for more details.
|
||||
- [stacks](stacks/):
|
||||
- A handful of Compose files (version 3) allowing to easily
|
||||
deploy complex application stacks.
|
||||
This method is very easy to get started (you don't need any extra account
|
||||
or resources!) but will require a bit of adaptation from the workshop slides.
|
||||
|
||||
To get started, go to [play-with-docker](http://play-with-docker.com/), and
|
||||
click on _ADD NEW INSTANCE_ five times. You will get five "docker-in-docker"
|
||||
containers, all on a private network. These are your five nodes for the workshop!
|
||||
|
||||
When the instructions in the slides tell you to "SSH on node X", just go to
|
||||
the tab corresponding to that node.
|
||||
|
||||
The nodes are not directly reachable from outside; so when the slides tell
|
||||
you to "connect to the IP address of your node on port XYZ" you will have
|
||||
to use a different method.
|
||||
|
||||
We suggest to use "supergrok", a container offering a NGINX+ngrok combo to
|
||||
expose your services. To use it, just start (on any of your nodes) the
|
||||
`jpetazzo/supergrok` image. The image will output further instructions:
|
||||
|
||||
```
|
||||
docker run --name supergrok -d jpetazzo/supergrok
|
||||
docker logs --follow supergrok
|
||||
```
|
||||
|
||||
The logs of the container will give you a tunnel address and explain you
|
||||
how to connected to exposed services. That's all you need to do!
|
||||
|
||||
We are also working on a native proxy, embedded to Play-With-Docker.
|
||||
Stay tuned!
|
||||
|
||||
<!--
|
||||
|
||||
- You can use a proxy provided by Play-With-Docker. When the slides
|
||||
instruct you to connect to nodeX on port ABC, instead, you will connect
|
||||
to http://play-with-docker.com/XXX.XXX.XXX.XXX:ABC, where XXX.XXX.XXX.XXX
|
||||
is the IP address of nodeX.
|
||||
|
||||
-->
|
||||
|
||||
Note that the instances provided by Play-With-Docker have a short lifespan
|
||||
(a few hours only), so if you want to do the workshop over multiple sessions,
|
||||
you will have to start over each time ... Or create your own cluster with
|
||||
one of the methods described below.
|
||||
|
||||
|
||||
## Course structure
|
||||
### Using Docker Machine to create your own cluster
|
||||
|
||||
(This applies only for the orchestration workshops.)
|
||||
This method requires a bit more work to get started, but you get a permanent
|
||||
cluster, with less limitations.
|
||||
|
||||
The workshop introduces a demo app, "DockerCoins," built
|
||||
around a micro-services architecture. First, we run it
|
||||
on a single node, using Docker Compose. Then, we pretend
|
||||
that we need to scale it, and we use an orchestrator
|
||||
(SwarmKit or Kubernetes) to deploy and scale the app on
|
||||
a cluster.
|
||||
You will need Docker Machine (if you have Docker Mac, Docker Windows, or
|
||||
the Docker Toolbox, you're all set already). You will also need:
|
||||
|
||||
We explain the concepts of the orchestrator. For SwarmKit,
|
||||
we setup the cluster with `docker swarm init` and `docker swarm join`.
|
||||
For Kubernetes, we use pre-configured clusters.
|
||||
- credentials for a cloud provider (e.g. API keys or tokens),
|
||||
- or a local install of VirtualBox or VMware (or anything supported
|
||||
by Docker Machine).
|
||||
|
||||
Then, we cover more advanced concepts: scaling, load balancing,
|
||||
updates, global services or daemon sets.
|
||||
|
||||
There are a number of advanced optional chapters about
|
||||
logging, metrics, secrets, network encryption, etc.
|
||||
|
||||
The content is very modular: it is broken down in a large
|
||||
number of Markdown files, that are put together according
|
||||
to a YAML manifest. This allows to re-use content
|
||||
between different workshops very easily.
|
||||
Full instructions are in the [prepare-machine](prepare-machine) subdirectory.
|
||||
|
||||
|
||||
### DockerCoins
|
||||
### Using our scripts to mass-create a bunch of clusters
|
||||
|
||||
The sample app is in the `dockercoins` directory.
|
||||
It's used during all chapters
|
||||
Since we often deliver the workshop during conferences or similar events,
|
||||
we have scripts to automate the creation of a bunch of clusters using
|
||||
AWS EC2. If you want to create multiple clusters and have EC2 credits,
|
||||
check the [prepare-vms](prepare-vms) directory for more information.
|
||||
|
||||
|
||||
## How This Repo is Organized
|
||||
|
||||
- **dockercoins**
|
||||
- Sample App: compose files and source code for the dockercoins sample apps
|
||||
used throughout the workshop
|
||||
- **docs**
|
||||
- Slide Deck: presentation slide deck, works out-of-box with GitHub Pages,
|
||||
uses https://remarkjs.com
|
||||
- **prepare-local**
|
||||
- untested scripts for automating the creation of local virtualbox VM's
|
||||
(could use your help validating)
|
||||
- **prepare-machine**
|
||||
- instructions explaining how to use Docker Machine to create VMs
|
||||
- **prepare-vms**
|
||||
- scripts for automating the creation of AWS instances for students
|
||||
|
||||
|
||||
## Slide Deck
|
||||
|
||||
- The slides are in the `docs` directory.
|
||||
- To view them locally open `docs/index.html` in your browser. It works
|
||||
offline too.
|
||||
- To view them online open https://jpetazzo.github.io/orchestration-workshop/
|
||||
in your browser.
|
||||
- When you fork this repo, be sure GitHub Pages is enabled in repo Settings
|
||||
for "master branch /docs folder" and you'll have your own website for them.
|
||||
- They use https://remarkjs.com to allow simple markdown in a html file that
|
||||
remark will transform into a presentation in the browser.
|
||||
|
||||
|
||||
## Sample App: Dockercoins!
|
||||
|
||||
The sample app is in the `dockercoins` directory. It's used during all chapters
|
||||
for explaining different concepts of orchestration.
|
||||
|
||||
To see it in action:
|
||||
@@ -138,18 +141,13 @@ To see it in action:
|
||||
- the web UI will be available on port 8000
|
||||
|
||||
|
||||
*If you just want to run the workshop for yourself, you can stop reading
|
||||
here. If you want to deliver the workshop for others (i.e. if you
|
||||
want to become an instructor), keep reading!*
|
||||
|
||||
|
||||
## Running the Workshop
|
||||
|
||||
If you want to deliver one of these workshops yourself,
|
||||
this section is for you!
|
||||
|
||||
> *This section has been mostly contributed by
|
||||
> [Bret Fisher](https://twitter.com/bretfisher), who was
|
||||
> one of the first persons to have the bravery of delivering
|
||||
> this workshop without me. Thanks Bret! 🍻
|
||||
>
|
||||
> Jérôme.*
|
||||
|
||||
|
||||
### General timeline of planning a workshop
|
||||
|
||||
@@ -157,7 +155,7 @@ this section is for you!
|
||||
understand the different `dockercoins` repo's and the steps we go through to
|
||||
get to a full Swarm Mode cluster of many containers. You'll update the first
|
||||
few slides and last slide at a minimum, with your info.
|
||||
- ~~Your docs directory can use GitHub Pages.~~
|
||||
- Your docs directory can use GitHub Pages.
|
||||
- This workshop expects 5 servers per student. You can get away with as little
|
||||
as 2 servers per student, but you'll need to change the slide deck to
|
||||
accommodate. More servers = more fun.
|
||||
@@ -183,14 +181,13 @@ this section is for you!
|
||||
they need for class.
|
||||
- Typically you create the servers the day before or morning of workshop, and
|
||||
leave them up the rest of day after workshop. If creating hundreds of servers,
|
||||
you'll likely want to run all these `workshopctl` commands from a dedicated
|
||||
you'll likely want to run all these `trainer` commands from a dedicated
|
||||
instance you have in same region as instances you want to create. Much faster
|
||||
this way if you're on poor internet. Also, create 2 sets of servers for
|
||||
yourself, and use one during workshop and the 2nd is a backup.
|
||||
- Remember you'll need to print the "cards" for students, so you'll need to
|
||||
create instances while you have a way to print them.
|
||||
|
||||
|
||||
### Things That Could Go Wrong
|
||||
|
||||
- Creating AWS instances ahead of time, and you hit its limits in region and
|
||||
@@ -199,20 +196,18 @@ this section is for you!
|
||||
locked-down computer, host firewall, etc.
|
||||
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
|
||||
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
|
||||
prevent you from losing your place if you get disconnected from servers.
|
||||
prevent you from loosing your place if you get disconnected from servers.
|
||||
https://tmux.github.io
|
||||
- Forget to print "cards" and cut them up for handing out IP's.
|
||||
- Forget to have fun and focus on your students!
|
||||
|
||||
|
||||
### Creating the VMs
|
||||
|
||||
`prepare-vms/workshopctl` is the script that gets you most of what you need for
|
||||
`prepare-vms/trainer` is the script that gets you most of what you need for
|
||||
setting up instances. See
|
||||
[prepare-vms/README.md](prepare-vms)
|
||||
for all the info on tools and scripts.
|
||||
|
||||
|
||||
### Content for Different Workshop Durations
|
||||
|
||||
With all the slides, this workshop is a full day long. If you need to deliver
|
||||
@@ -221,7 +216,6 @@ can replace `---` with `???` which will hide slides. Or leave them there and
|
||||
add something like `(EXTRA CREDIT)` to title so students can still view the
|
||||
content but you also know to skip during presentation.
|
||||
|
||||
|
||||
#### 3 Hour Version
|
||||
|
||||
- Limit time on debug tools, maybe skip a few. *"Chapter 1:
|
||||
@@ -233,7 +227,6 @@ content but you also know to skip during presentation.
|
||||
- Mention what DAB's are, but make this part optional in case you run out
|
||||
of time
|
||||
|
||||
|
||||
#### 2 Hour Version
|
||||
|
||||
- Skip all the above, and:
|
||||
@@ -247,17 +240,6 @@ content but you also know to skip during presentation.
|
||||
- Last 15-30 minutes is for stateful services, DAB files, and questions.
|
||||
|
||||
|
||||
### Pre-built images
|
||||
|
||||
There are pre-built images for the 4 components of the DockerCoins demo app: `dockercoins/hasher:v0.1`, `dockercoins/rng:v0.1`, `dockercoins/webui:v0.1`, and `dockercoins/worker:v0.1`. They correspond to the code in this repository.
|
||||
|
||||
There are also three variants, for demo purposes:
|
||||
|
||||
- `dockercoins/rng:v0.2` is broken (the server won't even start),
|
||||
- `dockercoins/webui:v0.2` has bigger font on the Y axis and a green graph (instead of blue),
|
||||
- `dockercoins/worker:v0.2` is 11x slower than `v0.1`.
|
||||
|
||||
|
||||
## Past events
|
||||
|
||||
Since its inception, this workshop has been delivered dozens of times,
|
||||
@@ -289,34 +271,13 @@ If there is a bug and you can't fix it, but you can
|
||||
reproduce it: submit an issue explaining how to reproduce.
|
||||
|
||||
If there is a bug and you can't even reproduce it:
|
||||
sorry. It is probably an Heisenbug. We can't act on it
|
||||
until it's reproducible, alas.
|
||||
sorry. It is probably an Heisenbug. I can't act on it
|
||||
until it's reproducible.
|
||||
|
||||
if you have attended this workshop and have feedback,
|
||||
or if you want us to deliver that workshop at your
|
||||
conference or for your company: contact me (jerome
|
||||
at docker dot com).
|
||||
|
||||
# “Please teach us!”
|
||||
|
||||
If you have attended one of these workshops, and want
|
||||
your team or organization to attend a similar one, you
|
||||
can look at the list of upcoming events on
|
||||
http://container.training/.
|
||||
|
||||
You are also welcome to reuse these materials to run
|
||||
your own workshop, for your team or even at a meetup
|
||||
or conference. In that case, you might enjoy watching
|
||||
[Bridget Kromhout's talk at KubeCon 2018 Europe](
|
||||
https://www.youtube.com/watch?v=mYsp_cGY2O0), explaining
|
||||
precisely how to run such a workshop yourself.
|
||||
|
||||
Finally, you can also contact the following persons,
|
||||
who are experienced speakers, are familiar with the
|
||||
material, and are available to deliver these workshops
|
||||
at your conference or for your company:
|
||||
|
||||
- jerome dot petazzoni at gmail dot com
|
||||
- bret at bretfisher dot com
|
||||
|
||||
(If you are willing and able to deliver such workshops,
|
||||
feel free to submit a PR to add your name to that list!)
|
||||
|
||||
**Thank you!**
|
||||
Thank you!
|
||||
|
||||
|
||||
191
autotest/autotest.py
Executable file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
def print_snippet(snippet):
|
||||
print(78*'-')
|
||||
print(snippet)
|
||||
print(78*'-')
|
||||
|
||||
class Snippet(object):
|
||||
|
||||
def __init__(self, slide, content):
|
||||
self.slide = slide
|
||||
self.content = content
|
||||
self.actions = []
|
||||
|
||||
def __str__(self):
|
||||
return self.content
|
||||
|
||||
|
||||
class Slide(object):
|
||||
|
||||
current_slide = 0
|
||||
|
||||
def __init__(self, content):
|
||||
Slide.current_slide += 1
|
||||
self.number = Slide.current_slide
|
||||
# Remove commented-out slides
|
||||
# (remark.js considers ??? to be the separator for speaker notes)
|
||||
content = re.split("\n\?\?\?\n", content)[0]
|
||||
self.content = content
|
||||
self.snippets = []
|
||||
exercises = re.findall("\.exercise\[(.*)\]", content, re.DOTALL)
|
||||
for exercise in exercises:
|
||||
if "```" in exercise and "<br/>`" in exercise:
|
||||
print("! Exercise on slide {} has both ``` and <br/>` delimiters, skipping."
|
||||
.format(self.number))
|
||||
print_snippet(exercise)
|
||||
elif "```" in exercise:
|
||||
for snippet in exercise.split("```")[1::2]:
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
elif "<br/>`" in exercise:
|
||||
for snippet in re.findall("<br/>`(.*)`", exercise):
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
else:
|
||||
print(" Exercise on slide {} has neither ``` or <br/>` delimiters, skipping."
|
||||
.format(self.number))
|
||||
|
||||
def __str__(self):
|
||||
text = self.content
|
||||
for snippet in self.snippets:
|
||||
text = text.replace(snippet.content, ansi("7")(snippet.content))
|
||||
return text
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return lambda s: "\x1b[{}m{}\x1b[0m".format(code, s)
|
||||
|
||||
slides = []
|
||||
with open("index.html") as f:
|
||||
content = f.read()
|
||||
for slide in re.split("\n---?\n", content):
|
||||
slides.append(Slide(slide))
|
||||
|
||||
is_editing_file = False
|
||||
placeholders = {}
|
||||
for slide in slides:
|
||||
for snippet in slide.snippets:
|
||||
content = snippet.content
|
||||
# Multi-line snippets should be ```highlightsyntax...
|
||||
# Single-line snippets will be interpreted as shell commands
|
||||
if '\n' in content:
|
||||
highlight, content = content.split('\n', 1)
|
||||
else:
|
||||
highlight = "bash"
|
||||
content = content.strip()
|
||||
# If the previous snippet was a file fragment, and the current
|
||||
# snippet is not YAML or EDIT, complain.
|
||||
if is_editing_file and highlight not in ["yaml", "edit"]:
|
||||
print("! On slide {}, previous snippet was YAML, so what do what do?"
|
||||
.format(slide.number))
|
||||
print_snippet(content)
|
||||
is_editing_file = False
|
||||
if highlight == "yaml":
|
||||
is_editing_file = True
|
||||
elif highlight == "placeholder":
|
||||
for line in content.split('\n'):
|
||||
variable, value = line.split(' ', 1)
|
||||
placeholders[variable] = value
|
||||
elif highlight == "bash":
|
||||
for variable, value in placeholders.items():
|
||||
quoted = "`{}`".format(variable)
|
||||
if quoted in content:
|
||||
content = content.replace(quoted, value)
|
||||
del placeholders[variable]
|
||||
if '`' in content:
|
||||
print("! The following snippet on slide {} contains a backtick:"
|
||||
.format(slide.number))
|
||||
print_snippet(content)
|
||||
continue
|
||||
print("_ "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
elif highlight == "edit":
|
||||
print(". "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
elif highlight == "meta":
|
||||
print("^ "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
else:
|
||||
print("! Unknown highlight {!r} on slide {}.".format(highlight, slide.number))
|
||||
if placeholders:
|
||||
print("! Remaining placeholder values: {}".format(placeholders))
|
||||
|
||||
actions = sum([snippet.actions for snippet in sum([slide.snippets for slide in slides], [])], [])
|
||||
|
||||
# Strip ^{ ... ^} for now
|
||||
def strip_curly_braces(actions, in_braces=False):
|
||||
if actions == []:
|
||||
return []
|
||||
elif actions[0] == ("meta", "^{"):
|
||||
return strip_curly_braces(actions[1:], True)
|
||||
elif actions[0] == ("meta", "^}"):
|
||||
return strip_curly_braces(actions[1:], False)
|
||||
elif in_braces:
|
||||
return strip_curly_braces(actions[1:], True)
|
||||
else:
|
||||
return [actions[0]] + strip_curly_braces(actions[1:], False)
|
||||
|
||||
actions = strip_curly_braces(actions)
|
||||
|
||||
background = []
|
||||
cwd = os.path.expanduser("~")
|
||||
env = {}
|
||||
for current_action, next_action in zip(actions, actions[1:]+[("bash", "true")]):
|
||||
if current_action[0] == "meta":
|
||||
continue
|
||||
print(ansi(7)(">>> {}".format(current_action[1])))
|
||||
time.sleep(1)
|
||||
popen_options = dict(shell=True, cwd=cwd, stdin=subprocess.PIPE, preexec_fn=os.setpgrp)
|
||||
# The follow hack allows to capture the environment variables set by `docker-machine env`
|
||||
# FIXME: this doesn't handle `unset` for now
|
||||
if any([
|
||||
"eval $(docker-machine env" in current_action[1],
|
||||
"DOCKER_HOST" in current_action[1],
|
||||
"COMPOSE_FILE" in current_action[1],
|
||||
]):
|
||||
popen_options["stdout"] = subprocess.PIPE
|
||||
current_action[1] += "\nenv"
|
||||
proc = subprocess.Popen(current_action[1], **popen_options)
|
||||
proc.cmd = current_action[1]
|
||||
if next_action[0] == "meta":
|
||||
print(">>> {}".format(next_action[1]))
|
||||
time.sleep(3)
|
||||
if next_action[1] == "^C":
|
||||
os.killpg(proc.pid, signal.SIGINT)
|
||||
proc.wait()
|
||||
elif next_action[1] == "^Z":
|
||||
# Let the process run
|
||||
background.append(proc)
|
||||
elif next_action[1] == "^D":
|
||||
proc.communicate()
|
||||
proc.wait()
|
||||
else:
|
||||
print("! Unknown meta action {} after snippet:".format(next_action[1]))
|
||||
print_snippet(next_action[1])
|
||||
print(ansi(7)("<<< {}".format(current_action[1])))
|
||||
else:
|
||||
proc.wait()
|
||||
if "stdout" in popen_options:
|
||||
stdout, stderr = proc.communicate()
|
||||
for line in stdout.split('\n'):
|
||||
if line.startswith("DOCKER_"):
|
||||
variable, value = line.split('=', 1)
|
||||
env[variable] = value
|
||||
print("=== {}={}".format(variable, value))
|
||||
print(ansi(7)("<<< {} >>> {}".format(proc.returncode, current_action[1])))
|
||||
if proc.returncode != 0:
|
||||
print("Got non-zero status code; aborting.")
|
||||
break
|
||||
if current_action[1].startswith("cd "):
|
||||
cwd = os.path.expanduser(current_action[1][3:])
|
||||
for proc in background:
|
||||
print("Terminating background process:")
|
||||
print_snippet(proc.cmd)
|
||||
proc.terminate()
|
||||
proc.wait()
|
||||
|
||||
1
autotest/index.html
Symbolic link
@@ -0,0 +1 @@
|
||||
../www/htdocs/index.html
|
||||
42
bin/add-load-balancer-v1.py
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
# arg 1 = service name
|
||||
# arg 2 = number of instances
|
||||
|
||||
service_name = sys.argv[1]
|
||||
desired_instances = int(sys.argv[2])
|
||||
|
||||
compose_file = os.environ["COMPOSE_FILE"]
|
||||
input_file, output_file = compose_file, compose_file
|
||||
|
||||
config = yaml.load(open(input_file))
|
||||
|
||||
# The ambassadors need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
port = str(ports[service_name])
|
||||
|
||||
command_line = port
|
||||
|
||||
depends_on = []
|
||||
|
||||
for n in range(1, 1+desired_instances):
|
||||
config["services"]["{}{}".format(service_name, n)] = config["services"][service_name]
|
||||
command_line += " {}{}:{}".format(service_name, n, port)
|
||||
depends_on.append("{}{}".format(service_name, n))
|
||||
|
||||
config["services"][service_name] = {
|
||||
"image": "jpetazzo/hamba",
|
||||
"command": command_line,
|
||||
"depends_on": depends_on,
|
||||
}
|
||||
if "networks" in config["services"]["{}1".format(service_name)]:
|
||||
config["services"][service_name]["networks"] = config["services"]["{}1".format(service_name)]["networks"]
|
||||
|
||||
yaml.safe_dump(config, open(output_file, "w"), default_flow_style=False)
|
||||
|
||||
87
bin/add-load-balancer-v2.py
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
def error(msg):
|
||||
print("ERROR: {}".format(msg))
|
||||
exit(1)
|
||||
|
||||
# arg 1 = service name
|
||||
|
||||
service_name = sys.argv[1]
|
||||
|
||||
compose_file = os.environ["COMPOSE_FILE"]
|
||||
input_file, output_file = compose_file, compose_file
|
||||
|
||||
config = yaml.load(open(input_file))
|
||||
|
||||
version = config.get("version")
|
||||
if version != "2":
|
||||
error("Unsupported $COMPOSE_FILE version: {!r}".format(version))
|
||||
|
||||
# The load balancers need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
port = str(ports[service_name])
|
||||
|
||||
if service_name not in config["services"]:
|
||||
error("service {} not found in $COMPOSE_FILE"
|
||||
.format(service_name))
|
||||
|
||||
lb_name = "{}-lb".format(service_name)
|
||||
be_name = "{}-be".format(service_name)
|
||||
wd_name = "{}-wd".format(service_name)
|
||||
|
||||
if lb_name in config["services"]:
|
||||
error("load balancer {} already exists in $COMPOSE_FILE"
|
||||
.format(lb_name))
|
||||
|
||||
if wd_name in config["services"]:
|
||||
error("dns watcher {} already exists in $COMPOSE_FILE"
|
||||
.format(wd_name))
|
||||
|
||||
service = config["services"][service_name]
|
||||
if "networks" in service:
|
||||
error("service {} has custom networks"
|
||||
.format(service_name))
|
||||
|
||||
# Put the service on its own network.
|
||||
service["networks"] = {service_name: {"aliases": [ be_name ] } }
|
||||
# Put a label indicating which load balancer is responsible for this service.
|
||||
if "labels" not in service:
|
||||
service["labels"] = {}
|
||||
service["labels"]["loadbalancer"] = lb_name
|
||||
|
||||
# Add the load balancer.
|
||||
config["services"][lb_name] = {
|
||||
"image": "jpetazzo/hamba",
|
||||
"command": "{} {} {}".format(port, be_name, port),
|
||||
"depends_on": [ service_name ],
|
||||
"networks": {
|
||||
"default": {
|
||||
"aliases": [ service_name ],
|
||||
},
|
||||
service_name: None,
|
||||
},
|
||||
}
|
||||
|
||||
# Add the DNS watcher.
|
||||
config["services"][wd_name] = {
|
||||
"image": "jpetazzo/watchdns",
|
||||
"command": "{} {} {}".format(port, be_name, port),
|
||||
"volumes_from": [ lb_name ],
|
||||
"networks": {
|
||||
service_name: None,
|
||||
},
|
||||
}
|
||||
|
||||
if "networks" not in config:
|
||||
config["networks"] = {}
|
||||
if service_name not in config["networks"]:
|
||||
config["networks"][service_name] = None
|
||||
|
||||
yaml.safe_dump(config, open(output_file, "w"), default_flow_style=False)
|
||||
|
||||
63
bin/build-tag-push.py
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
registry = os.environ.get("DOCKER_REGISTRY")
|
||||
|
||||
if not registry:
|
||||
print("Please set the DOCKER_REGISTRY variable, e.g.:")
|
||||
print("export DOCKER_REGISTRY=jpetazzo # use the Docker Hub")
|
||||
print("export DOCKER_REGISTRY=localhost:5000 # use a local registry")
|
||||
exit(1)
|
||||
|
||||
# Get the name of the current directory.
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Version used to tag the generated Docker image, using the UNIX timestamp or the given version.
|
||||
if "VERSION" not in os.environ:
|
||||
version = str(int(time.time()))
|
||||
else:
|
||||
version = os.environ["VERSION"]
|
||||
|
||||
# Execute "docker-compose build" and abort if it fails.
|
||||
subprocess.check_call(["docker-compose", "-f", "docker-compose.yml", "build"])
|
||||
|
||||
# Load the services from the input docker-compose.yml file.
|
||||
# TODO: run parallel builds.
|
||||
compose_file = ComposeFile("docker-compose.yml")
|
||||
|
||||
# Iterate over all services that have a "build" definition.
|
||||
# Tag them, and initiate a push in the background.
|
||||
push_operations = dict()
|
||||
for service_name, service in compose_file.services.items():
|
||||
if "build" in service:
|
||||
compose_image = "{}_{}".format(project_name, service_name)
|
||||
registry_image = "{}/{}:{}".format(registry, compose_image, version)
|
||||
# Re-tag the image so that it can be uploaded to the registry.
|
||||
subprocess.check_call(["docker", "tag", compose_image, registry_image])
|
||||
# Spawn "docker push" to upload the image.
|
||||
push_operations[service_name] = subprocess.Popen(["docker", "push", registry_image])
|
||||
# Replace the "build" definition by an "image" definition,
|
||||
# using the name of the image on the registry.
|
||||
del service["build"]
|
||||
service["image"] = registry_image
|
||||
|
||||
# Wait for push operations to complete.
|
||||
for service_name, popen_object in push_operations.items():
|
||||
print("Waiting for {} push to complete...".format(service_name))
|
||||
popen_object.wait()
|
||||
print("Done.")
|
||||
|
||||
# Write the new docker-compose.yml file.
|
||||
if "COMPOSE_FILE" not in os.environ:
|
||||
os.environ["COMPOSE_FILE"] = "docker-compose.yml-{}".format(version)
|
||||
print("Writing to new Compose file:")
|
||||
else:
|
||||
print("Writing to provided Compose file:")
|
||||
|
||||
print("COMPOSE_FILE={}".format(os.environ["COMPOSE_FILE"]))
|
||||
compose_file.save()
|
||||
|
||||
76
bin/common.py
Executable file
@@ -0,0 +1,76 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
|
||||
def COMPOSE_FILE():
|
||||
if "COMPOSE_FILE" not in os.environ:
|
||||
print("The $COMPOSE_FILE environment variable is not set. Aborting.")
|
||||
exit(1)
|
||||
return os.environ["COMPOSE_FILE"]
|
||||
|
||||
|
||||
class ComposeFile(object):
|
||||
|
||||
def __init__(self, filename=None):
|
||||
if filename is None:
|
||||
filename = COMPOSE_FILE()
|
||||
if not os.path.isfile(filename):
|
||||
print("File {!r} does not exist. Aborting.".format(filename))
|
||||
exit(1)
|
||||
self.data = yaml.load(open(filename))
|
||||
|
||||
@property
|
||||
def services(self):
|
||||
if self.data.get("version") == "2":
|
||||
return self.data["services"]
|
||||
else:
|
||||
return self.data
|
||||
|
||||
def save(self, filename=None):
|
||||
if filename is None:
|
||||
filename = COMPOSE_FILE()
|
||||
with open(filename, "w") as f:
|
||||
yaml.safe_dump(self.data, f, default_flow_style=False)
|
||||
|
||||
# Executes a bunch of commands in parallel, but no more than N at a time.
|
||||
# This allows to execute concurrently a large number of tasks, without
|
||||
# turning into a fork bomb.
|
||||
# `parallelism` is the number of tasks to execute simultaneously.
|
||||
# `commands` is a list of tasks to execute.
|
||||
# Each task is itself a list, where the first element is a descriptive
|
||||
# string, and the folloowing elements are the arguments to pass to Popen.
|
||||
def parallel_run(commands, parallelism):
|
||||
running = []
|
||||
# While stuff is running, or we have stuff to run...
|
||||
while commands or running:
|
||||
# While there is stuff to run, and room in the pipe...
|
||||
while commands and len(running)<parallelism:
|
||||
command = commands.pop(0)
|
||||
print("START {}".format(command[0]))
|
||||
popen = subprocess.Popen(
|
||||
command[1:], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
popen._desc = command[0]
|
||||
running.append(popen)
|
||||
must_sleep = True
|
||||
for popen in running:
|
||||
status = popen.poll()
|
||||
if status is not None:
|
||||
must_sleep = False
|
||||
running.remove(popen)
|
||||
if status==0:
|
||||
print("OK {}".format(popen._desc))
|
||||
else:
|
||||
print("ERROR {} [Exit status: {}]"
|
||||
.format(popen._desc, status))
|
||||
output = "\n" + popen.communicate()[0].strip()
|
||||
output = output.replace("\n", "\n| ")
|
||||
print(output)
|
||||
else:
|
||||
print("WAIT ({} running, {} more to run)"
|
||||
.format(len(running), len(commands)))
|
||||
if must_sleep:
|
||||
time.sleep(1)
|
||||
|
||||
69
bin/configure-ambassadors.py
Executable file
@@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import parallel_run
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all services and backends in our compose application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "com.docker.compose.service" }} '
|
||||
'{{ .Ports }}',
|
||||
])
|
||||
|
||||
# Build list of backends.
|
||||
frontend_ports = dict()
|
||||
backends = dict()
|
||||
for container in containers_data.split('\n'):
|
||||
if not container:
|
||||
continue
|
||||
# TODO: support services with multiple ports!
|
||||
container_id, service_name, port = container.split(' ')
|
||||
if not port:
|
||||
continue
|
||||
backend, frontend = port.split("->")
|
||||
backend_addr, backend_port = backend.split(':')
|
||||
frontend_port, frontend_proto = frontend.split('/')
|
||||
# TODO: deal with udp (mostly skip it?)
|
||||
assert frontend_proto == "tcp"
|
||||
# TODO: check inconsistencies between port mappings
|
||||
frontend_ports[service_name] = frontend_port
|
||||
if service_name not in backends:
|
||||
backends[service_name] = []
|
||||
backends[service_name].append((backend_addr, backend_port))
|
||||
|
||||
# Get all existing ambassadors for this application.
|
||||
ambassadors_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=ambassador.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "ambassador.service" }} '
|
||||
'{{ .Label "ambassador.bindaddr" }}',
|
||||
])
|
||||
|
||||
# Update ambassadors.
|
||||
operations = []
|
||||
for ambassador in ambassadors_data.split('\n'):
|
||||
if not ambassador:
|
||||
continue
|
||||
ambassador_id, service_name, bind_address = ambassador.split()
|
||||
print("Updating configuration for {}/{} -> {}:{} -> {}"
|
||||
.format(service_name, ambassador_id,
|
||||
bind_address, frontend_ports[service_name],
|
||||
backends[service_name]))
|
||||
command = [
|
||||
ambassador_id,
|
||||
"docker", "run", "--rm", "--volumes-from", ambassador_id,
|
||||
"jpetazzo/hamba", "reconfigure",
|
||||
"{}:{}".format(bind_address, frontend_ports[service_name])
|
||||
]
|
||||
for backend_addr, backend_port in backends[service_name]:
|
||||
command.extend([backend_addr, backend_port])
|
||||
operations.append(command)
|
||||
|
||||
# Execute all commands in parallel.
|
||||
parallel_run(operations, 10)
|
||||
71
bin/create-ambassadors.py
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile, parallel_run
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
config = ComposeFile()
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all services in our compose application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} {{ .Label "com.docker.compose.service" }}',
|
||||
])
|
||||
|
||||
# Get all existing ambassadors for this application.
|
||||
ambassadors_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=ambassador.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "ambassador.container" }} '
|
||||
'{{ .Label "ambassador.service" }}',
|
||||
])
|
||||
|
||||
# Build a set of existing ambassadors.
|
||||
ambassadors = dict()
|
||||
for ambassador in ambassadors_data.split('\n'):
|
||||
if not ambassador:
|
||||
continue
|
||||
ambassador_id, container_id, linked_service = ambassador.split()
|
||||
ambassadors[container_id, linked_service] = ambassador_id
|
||||
|
||||
operations = []
|
||||
|
||||
# Start the missing ambassadors.
|
||||
for container in containers_data.split('\n'):
|
||||
if not container:
|
||||
continue
|
||||
container_id, service_name = container.split()
|
||||
extra_hosts = config.services[service_name].get("extra_hosts", {})
|
||||
for linked_service, bind_address in extra_hosts.items():
|
||||
description = "Ambassador {}/{}/{}".format(
|
||||
service_name, container_id, linked_service)
|
||||
ambassador_id = ambassadors.pop((container_id, linked_service), None)
|
||||
if ambassador_id:
|
||||
print("{} already exists: {}".format(description, ambassador_id))
|
||||
else:
|
||||
print("{} not found, creating it.".format(description))
|
||||
operations.append([
|
||||
description,
|
||||
"docker", "run", "-d",
|
||||
"--net", "container:{}".format(container_id),
|
||||
"--label", "ambassador.project={}".format(project_name),
|
||||
"--label", "ambassador.container={}".format(container_id),
|
||||
"--label", "ambassador.service={}".format(linked_service),
|
||||
"--label", "ambassador.bindaddr={}".format(bind_address),
|
||||
"jpetazzo/hamba", "run"
|
||||
])
|
||||
|
||||
# Destroy extraneous ambassadors.
|
||||
for ambassador_id in ambassadors.values():
|
||||
print("{} is not useful anymore, destroying it.".format(ambassador_id))
|
||||
operations.append([
|
||||
"rm -f {}".format(ambassador_id),
|
||||
"docker", "rm", "-f", ambassador_id,
|
||||
])
|
||||
|
||||
# Execute all commands in parallel.
|
||||
parallel_run(operations, 10)
|
||||
3
bin/delete-ambassadors.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
docker ps -q --filter label=ambassador.project=dockercoins |
|
||||
xargs docker rm -f
|
||||
16
bin/fixup-yaml.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/sh
|
||||
# Some tools will choke on the YAML files generated by PyYAML;
|
||||
# in particular on a section like this one:
|
||||
#
|
||||
# service:
|
||||
# ports:
|
||||
# - 8000:5000
|
||||
#
|
||||
# This script adds two spaces in front of the dash in those files.
|
||||
# Warning: it is a hack, and probably won't work on some YAML files.
|
||||
[ -f "$COMPOSE_FILE" ] || {
|
||||
echo "Cannot find COMPOSE_FILE"
|
||||
exit 1
|
||||
}
|
||||
sed -i 's/^ -/ -/' $COMPOSE_FILE
|
||||
|
||||
38
bin/link-to-ambassadors.py
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile
|
||||
import yaml
|
||||
|
||||
config = ComposeFile()
|
||||
|
||||
# The ambassadors need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
def generate_local_addr():
|
||||
last_byte = 2
|
||||
while last_byte<255:
|
||||
yield "127.127.0.{}".format(last_byte)
|
||||
last_byte += 1
|
||||
|
||||
for service_name, service in config.services.items():
|
||||
if "links" in service:
|
||||
for link, local_addr in zip(service["links"], generate_local_addr()):
|
||||
if link not in ports:
|
||||
print("Skipping link {} in service {} "
|
||||
"(no port mapping defined). "
|
||||
"Your code will probably break."
|
||||
.format(link, service_name))
|
||||
continue
|
||||
if "extra_hosts" not in service:
|
||||
service["extra_hosts"] = {}
|
||||
service["extra_hosts"][link] = local_addr
|
||||
del service["links"]
|
||||
if "ports" in service:
|
||||
del service["ports"]
|
||||
if "volumes" in service:
|
||||
del service["volumes"]
|
||||
if service_name in ports:
|
||||
service["ports"] = [ ports[service_name] ]
|
||||
|
||||
config.save()
|
||||
46
bin/reconfigure-load-balancers.py
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# FIXME: hardcoded
|
||||
PORT="80"
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all existing services for this application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .Label "com.docker.compose.service" }} '
|
||||
'{{ .Label "com.docker.compose.container-number" }} '
|
||||
'{{ .Label "loadbalancer" }}',
|
||||
])
|
||||
|
||||
load_balancers = dict()
|
||||
for line in containers_data.split('\n'):
|
||||
if not line:
|
||||
continue
|
||||
service_name, container_number, load_balancer = line.split(' ')
|
||||
if load_balancer:
|
||||
if load_balancer not in load_balancers:
|
||||
load_balancers[load_balancer] = []
|
||||
load_balancers[load_balancer].append((service_name, int(container_number)))
|
||||
|
||||
for load_balancer, backends in load_balancers.items():
|
||||
# FIXME: iterate on all load balancers
|
||||
container_name = "{}_{}_1".format(project_name, load_balancer)
|
||||
command = [
|
||||
"docker", "run", "--rm",
|
||||
"--volumes-from", container_name,
|
||||
"--net", "container:{}".format(container_name),
|
||||
"jpetazzo/hamba", "reconfigure", PORT,
|
||||
]
|
||||
command.extend(
|
||||
"{}_{}_{}:{}".format(project_name, backend_name, backend_number, PORT)
|
||||
for (backend_name, backend_number) in sorted(backends)
|
||||
)
|
||||
print("Updating configuration for {} with {} backend(s)..."
|
||||
.format(container_name, len(backends)))
|
||||
subprocess.check_output(command)
|
||||
|
||||
201
bin/setup-all-the-things.sh
Executable file
@@ -0,0 +1,201 @@
|
||||
#!/bin/sh
|
||||
unset DOCKER_REGISTRY
|
||||
unset DOCKER_HOST
|
||||
unset COMPOSE_FILE
|
||||
|
||||
SWARM_IMAGE=${SWARM_IMAGE:-swarm}
|
||||
|
||||
prepare_1_check_ssh_keys () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N true
|
||||
done
|
||||
}
|
||||
|
||||
prepare_2_compile_swarm () {
|
||||
cd ~
|
||||
git clone git://github.com/docker/swarm
|
||||
cd swarm
|
||||
[[ -z "$1" ]] && {
|
||||
echo "Specify which revision to build."
|
||||
return
|
||||
}
|
||||
git checkout "$1" || return
|
||||
mkdir -p image
|
||||
docker build -t docker/swarm:$1 .
|
||||
docker run -i --entrypoint sh docker/swarm:$1 \
|
||||
-c 'cat $(which swarm)' > image/swarm
|
||||
chmod +x image/swarm
|
||||
cat >image/Dockerfile <<EOF
|
||||
FROM scratch
|
||||
COPY ./swarm /swarm
|
||||
ENTRYPOINT ["/swarm", "-debug", "-experimental"]
|
||||
EOF
|
||||
docker build -t jpetazzo/swarm:$1 image
|
||||
docker login
|
||||
docker push jpetazzo/swarm:$1
|
||||
docker logout
|
||||
SWARM_IMAGE=jpetazzo/swarm:$1
|
||||
}
|
||||
|
||||
clean_1_containers () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker ps -aq | xargs -r -n1 -P10 docker rm -f"
|
||||
done
|
||||
}
|
||||
|
||||
clean_2_volumes () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker volume ls -q | xargs -r docker volume rm"
|
||||
done
|
||||
}
|
||||
|
||||
clean_3_images () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker images | awk '/dockercoins|jpetazzo/ {print \$1\":\"\$2}' | xargs -r docker rmi -f"
|
||||
done
|
||||
}
|
||||
|
||||
clean_4_machines () {
|
||||
rm -rf ~/.docker/machine/
|
||||
}
|
||||
|
||||
clean_all () {
|
||||
clean_1_containers
|
||||
clean_2_volumes
|
||||
clean_3_images
|
||||
clean_4_machines
|
||||
}
|
||||
|
||||
dm_swarm () {
|
||||
eval $(docker-machine env node1 --swarm)
|
||||
}
|
||||
dm_node1 () {
|
||||
eval $(docker-machine env node1)
|
||||
}
|
||||
|
||||
setup_1_swarm () {
|
||||
grep node[12345] /etc/hosts | grep -v ^127 |
|
||||
while read IPADDR NODENAME; do
|
||||
docker-machine create --driver generic \
|
||||
--engine-opt cluster-store=consul://localhost:8500 \
|
||||
--engine-opt cluster-advertise=eth0:2376 \
|
||||
--swarm --swarm-master --swarm-image $SWARM_IMAGE \
|
||||
--swarm-discovery consul://localhost:8500 \
|
||||
--swarm-opt replication --swarm-opt advertise=$IPADDR:3376 \
|
||||
--generic-ssh-user docker --generic-ip-address $IPADDR $NODENAME
|
||||
done
|
||||
}
|
||||
|
||||
setup_2_consul () {
|
||||
IPADDR=$(ssh node1 ip a ls dev eth0 |
|
||||
sed -n 's,.*inet \(.*\)/.*,\1,p')
|
||||
|
||||
for N in 1 2 3 4 5; do
|
||||
ssh node$N -- docker run -d --restart=always --name consul_node$N \
|
||||
-e CONSUL_BIND_INTERFACE=eth0 --net host consul \
|
||||
agent -server -retry-join $IPADDR -bootstrap-expect 5 \
|
||||
-ui -client 0.0.0.0
|
||||
done
|
||||
}
|
||||
|
||||
setup_3_wait () {
|
||||
# Wait for a Swarm master
|
||||
dm_swarm
|
||||
while ! docker ps; do sleep 1; done
|
||||
|
||||
# Wait for all nodes to be there
|
||||
while ! [ "$(docker info | grep "^Nodes:")" = "Nodes: 5" ]; do sleep 1; done
|
||||
}
|
||||
|
||||
setup_4_registry () {
|
||||
cd ~/orchestration-workshop/registry
|
||||
dm_swarm
|
||||
docker-compose up -d
|
||||
for N in $(seq 2 5); do
|
||||
docker-compose scale frontend=$N
|
||||
done
|
||||
}
|
||||
|
||||
setup_5_btp_dockercoins () {
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
dm_node1
|
||||
export DOCKER_REGISTRY=localhost:5000
|
||||
cp docker-compose.yml-v2 docker-compose.yml
|
||||
~/orchestration-workshop/bin/build-tag-push.py | tee /tmp/btp.log
|
||||
export $(tail -n 1 /tmp/btp.log)
|
||||
}
|
||||
|
||||
setup_6_add_lbs () {
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
~/orchestration-workshop/bin/add-load-balancer-v2.py rng
|
||||
~/orchestration-workshop/bin/add-load-balancer-v2.py hasher
|
||||
}
|
||||
|
||||
setup_7_consulfs () {
|
||||
dm_swarm
|
||||
docker pull jpetazzo/consulfs
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker run --rm -v /usr/local/bin:/target jpetazzo/consulfs"
|
||||
ssh node$N mkdir -p ~/consul
|
||||
ssh -f node$N "mountpoint ~/consul || consulfs localhost:8500 ~/consul"
|
||||
done
|
||||
}
|
||||
|
||||
setup_8_syncmachine () {
|
||||
while ! mountpoint ~/consul; do
|
||||
sleep 1
|
||||
done
|
||||
cp -r ~/.docker/machine ~/consul/
|
||||
for N in $(seq 2 5); do
|
||||
ssh node$N mkdir -p ~/.docker
|
||||
ssh node$N "[ -L ~/.docker/machine ] || ln -s ~/consul/machine ~/.docker"
|
||||
done
|
||||
}
|
||||
|
||||
setup_9_elk () {
|
||||
dm_swarm
|
||||
cd ~/orchestration-workshop/elk
|
||||
docker-compose up -d
|
||||
for N in $(seq 1 5); do
|
||||
docker-compose scale logstash=$N
|
||||
done
|
||||
}
|
||||
|
||||
setup_all () {
|
||||
setup_1_swarm
|
||||
setup_2_consul
|
||||
setup_3_wait
|
||||
setup_4_registry
|
||||
setup_5_btp_dockercoins
|
||||
setup_6_add_lbs
|
||||
setup_7_consulfs
|
||||
setup_8_syncmachine
|
||||
dm_swarm
|
||||
}
|
||||
|
||||
|
||||
force_remove_network () {
|
||||
dm_swarm
|
||||
NET="$1"
|
||||
for CNAME in $(docker network inspect $NET | grep Name | grep -v \"$NET\" | cut -d\" -f4); do
|
||||
echo $CNAME
|
||||
docker network disconnect -f $NET $CNAME
|
||||
done
|
||||
docker network rm $NET
|
||||
}
|
||||
|
||||
demo_1_compose_up () {
|
||||
dm_swarm
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
docker-compose up -d
|
||||
}
|
||||
|
||||
grep -qs -- MAGICMARKER "$0" && { # Don't display this line in the function lis
|
||||
echo "You should source this file, then invoke the following functions:"
|
||||
grep -- '^[a-z].*{$' "$0" | cut -d" " -f1
|
||||
}
|
||||
|
||||
show_swarm_primary () {
|
||||
dm_swarm
|
||||
docker info 2>/dev/null | grep -e ^Role -e ^Primary
|
||||
}
|
||||
12
cadvisor/docker-compose.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
cadvisor:
|
||||
image: google/cadvisor
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- "/:/rootfs:ro"
|
||||
- "/var/run:/var/run:rw"
|
||||
- "/sys:/sys:ro"
|
||||
- "/var/lib/docker/:/var/lib/docker:ro"
|
||||
19
ceph/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# CEPH on Docker
|
||||
|
||||
Note: this doesn't quite work yet.
|
||||
|
||||
The OSD containers need to be started twice (the first time, they fail
|
||||
initializing; second time is a champ).
|
||||
|
||||
Also, it looks like you need at least two OSD containers (or the OSD
|
||||
container should have two disks/directories, whatever).
|
||||
|
||||
RadosGw is listening on port 8080.
|
||||
|
||||
The `admin` container will create a `docker` user using `radosgw-admin`.
|
||||
If you run it multiple times, that's OK: further invocations are idempotent.
|
||||
|
||||
Last but not least: it looks like AWS CLI uses a new signature format
|
||||
that doesn't work with RadosGW. After almost two hours trying to figure
|
||||
out what was wrong, I tried the S3 credentials directly with boto and
|
||||
it worked immediately (I was able to create a bucket).
|
||||
53
ceph/docker-compose.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
mon:
|
||||
image: ceph/daemon
|
||||
command: mon
|
||||
environment:
|
||||
CEPH_PUBLIC_NETWORK: 10.33.0.0/16
|
||||
MON_IP: 10.33.0.2
|
||||
osd:
|
||||
image: ceph/daemon
|
||||
command: osd_directory
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
volumes:
|
||||
- /var/lib/ceph/osd
|
||||
mds:
|
||||
image: ceph/daemon
|
||||
command: mds
|
||||
environment:
|
||||
CEPHFS_CREATE: 1
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
rgw:
|
||||
image: ceph/daemon
|
||||
command: rgw
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
environment:
|
||||
CEPH_OPTS: --verbose
|
||||
admin:
|
||||
image: ceph/daemon
|
||||
entrypoint: radosgw-admin
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
command: user create --uid=docker --display-name=docker
|
||||
|
||||
networks:
|
||||
default:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.33.0.0/16
|
||||
gateway: 10.33.0.1
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
hostname frr
|
||||
router bgp 64512
|
||||
network 1.0.0.2/32
|
||||
bgp log-neighbor-changes
|
||||
neighbor kube peer-group
|
||||
neighbor kube remote-as 64512
|
||||
neighbor kube route-reflector-client
|
||||
bgp listen range 0.0.0.0/0 peer-group kube
|
||||
log stdout
|
||||
@@ -1,3 +0,0 @@
|
||||
hostname frr
|
||||
ip nht resolve-via-default
|
||||
log stdout
|
||||
@@ -1,40 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
bgpd:
|
||||
image: frrouting/frr:v8.2.2
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
|
||||
restart: always
|
||||
|
||||
zebra:
|
||||
image: frrouting/frr:v8.2.2
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
|
||||
restart: always
|
||||
|
||||
vtysh:
|
||||
image: frrouting/frr:v8.2.2
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: vtysh
|
||||
|
||||
chmod:
|
||||
image: alpine
|
||||
volumes:
|
||||
- ./run:/var/run/frr
|
||||
command: chmod 777 /var/run/frr
|
||||
@@ -1,29 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
@@ -1,128 +0,0 @@
|
||||
---
|
||||
apiVersion: |+
|
||||
|
||||
|
||||
Make sure you update the line with --master=http://X.X.X.X:8080 below.
|
||||
Then remove this section from this YAML file and try again.
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.0",
|
||||
"name":"mynet",
|
||||
"plugins":[
|
||||
{
|
||||
"name":"kubernetes",
|
||||
"type":"bridge",
|
||||
"bridge":"kube-bridge",
|
||||
"isDefaultGateway":true,
|
||||
"ipam":{
|
||||
"type":"host-local"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: kube-router
|
||||
containers:
|
||||
- name: kube-router
|
||||
image: docker.io/cloudnativelabs/kube-router
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--run-router=true"
|
||||
- "--run-firewall=true"
|
||||
- "--run-service-proxy=true"
|
||||
- "--master=http://X.X.X.X:8080"
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: KUBE_ROUTER_CNI_CONF_FILE
|
||||
value: /etc/cni/net.d/10-kuberouter.conflist
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 20244
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 3
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 250Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: busybox
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- set -e -x;
|
||||
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
|
||||
if [ -f /etc/cni/net.d/*.conf ]; then
|
||||
rm -f /etc/cni/net.d/*.conf;
|
||||
fi;
|
||||
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
|
||||
cp /etc/kube-router/cni-conf.json ${TMP};
|
||||
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/cni/net.d
|
||||
name: cni-conf-dir
|
||||
- mountPath: /etc/kube-router
|
||||
name: kube-router-cfg
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: kube-router-cfg
|
||||
configMap:
|
||||
name: kube-router-cfg
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# Note: hyperkube isn't available after Kubernetes 1.18.
|
||||
# So we'll have to update this for Kubernetes 1.19!
|
||||
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
12
consul/docker-compose.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: "2"
|
||||
services:
|
||||
bootstrap:
|
||||
image: jpetazzo/consul
|
||||
command: agent -server -bootstrap
|
||||
container_name: bootstrap
|
||||
server:
|
||||
image: jpetazzo/consul
|
||||
command: agent -server -join bootstrap -join server
|
||||
client:
|
||||
image: jpetazzo/consul
|
||||
command: members -rpc-addr server:8400
|
||||
@@ -1,72 +0,0 @@
|
||||
# (1) Setting up a registry, and telling Tilt to use it.
|
||||
|
||||
# Tilt needs a registry to store images.
|
||||
|
||||
# The following manifest defines a Deployment to run a basic Docker registry,
|
||||
# and a NodePort Service to access it. Using a NodePort means that we don't
|
||||
# need to obtain a TLS certificate, because we will be accessing the registry
|
||||
# through localhost.
|
||||
k8s_yaml('../k8s/tilt-registry.yaml')
|
||||
|
||||
# Tell Tilt to use the registry that we just deployed instead of whatever
|
||||
# is defined in our Kubernetes resources. Tilt will patch image names to
|
||||
# use our registry.
|
||||
default_registry('localhost:30555')
|
||||
|
||||
# Create a port forward so that we can access the registry from our local
|
||||
# environment, too. Note that if you run Tilt directly from a Kubernetes node
|
||||
# (which is not typical, but might happen in some lab/training environments)
|
||||
# the following might cause an error because port 30555 is already taken.
|
||||
k8s_resource(workload='tilt-registry', port_forwards='30555:5000')
|
||||
|
||||
# (2) Telling Tilt how to build and run our app.
|
||||
|
||||
# The following two lines will use the kubectl-build plugin
|
||||
# to leverage buildkit and build the images in our Kubernetes
|
||||
# cluster. This is not enabled by default, because it requires
|
||||
# the plugin to be installed.
|
||||
# See https://github.com/vmware-tanzu/buildkit-cli-for-kubectl
|
||||
# for more information about this plugin.
|
||||
#load('ext://kubectl_build', 'kubectl_build')
|
||||
#docker_build = kubectl_build
|
||||
|
||||
# Our Kubernetes manifests use images 'dockercoins/...' so we tell Tilt
|
||||
# how each of these images should be built. The first argument is the name
|
||||
# of the image, the second argument is the directory containing the build
|
||||
# context (i.e. the Dockerfile to build the image).
|
||||
docker_build('dockercoins/hasher', 'hasher')
|
||||
docker_build('dockercoins/rng', 'rng')
|
||||
docker_build('dockercoins/webui', 'webui')
|
||||
docker_build('dockercoins/worker', 'worker')
|
||||
|
||||
# The following manifests defines five Deployments and four Services for
|
||||
# our application.
|
||||
k8s_yaml('../k8s/dockercoins.yaml')
|
||||
|
||||
# (3) Finishing touches.
|
||||
|
||||
# The following line lets Tilt run with the default kubeadm cluster-admin context.
|
||||
allow_k8s_contexts('kubernetes-admin@kubernetes')
|
||||
|
||||
# Note: the whole section below (to set up ngrok tunnels) is disabled,
|
||||
# because ngrok now requires to set up an account to serve HTML
|
||||
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
|
||||
# but not to serve web pages like the Tilt UI.
|
||||
|
||||
# # This will run an ngrok tunnel to expose Tilt to the outside world.
|
||||
# # This is intended to be used when Tilt runs on a remote machine.
|
||||
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
|
||||
|
||||
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
|
||||
# # We send the output to /dev/tty so that it doesn't get intercepted by
|
||||
# # Tilt, and gets displayed to the user's terminal instead.
|
||||
# # Note: this assumes that the ngrok instance will be running on port 4040.
|
||||
# # If you have other ngrok instances running on the machine, this might not work.
|
||||
# local_resource(name='ngrok:showurl', cmd='''
|
||||
# while sleep 1; do
|
||||
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
|
||||
# [ "$TUNNELS" ] && break
|
||||
# done
|
||||
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
|
||||
# '''
|
||||
# )
|
||||
@@ -1,24 +1,26 @@
|
||||
services:
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
rng:
|
||||
build: rng
|
||||
ports:
|
||||
- "8001:80"
|
||||
- "8001:80"
|
||||
|
||||
hasher:
|
||||
build: hasher
|
||||
ports:
|
||||
- "8002:80"
|
||||
- "8002:80"
|
||||
|
||||
webui:
|
||||
build: webui
|
||||
ports:
|
||||
- "8000:80"
|
||||
- "8000:80"
|
||||
volumes:
|
||||
- "./webui/files/:/files/"
|
||||
- "./webui/files/:/files/"
|
||||
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
worker:
|
||||
build: worker
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
FROM ruby:alpine
|
||||
WORKDIR /app
|
||||
RUN apk add --update build-base curl
|
||||
RUN gem install sinatra --version '~> 3'
|
||||
RUN apk add --update build-base
|
||||
RUN gem install sinatra
|
||||
RUN gem install thin
|
||||
COPY hasher.rb .
|
||||
CMD ["ruby", "hasher.rb", "-o", "::"]
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -2,6 +2,7 @@ require 'digest'
|
||||
require 'sinatra'
|
||||
require 'socket'
|
||||
|
||||
set :bind, '0.0.0.0'
|
||||
set :port, 80
|
||||
|
||||
post '/' do
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
FROM python:alpine
|
||||
WORKDIR /app
|
||||
RUN pip install Flask
|
||||
COPY rng.py .
|
||||
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
|
||||
CMD ["flask", "run", "--without-threads"]
|
||||
COPY rng.py /
|
||||
CMD ["python", "rng.py"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -28,5 +28,5 @@ def rng(how_many_bytes):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(port=80)
|
||||
app.run(host="0.0.0.0", port=80)
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
FROM node:23-alpine
|
||||
WORKDIR /app
|
||||
FROM node:4-slim
|
||||
RUN npm install express
|
||||
RUN npm install morgan
|
||||
RUN npm install redis@5
|
||||
COPY . .
|
||||
RUN npm install redis
|
||||
COPY files/ /files/
|
||||
COPY webui.js /
|
||||
CMD ["node", "webui.js"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
color: royalblue;
|
||||
}
|
||||
</style>
|
||||
<script src="jquery-1.11.3.min.js"></script>
|
||||
<script src="jquery.js"></script>
|
||||
<script src="d3.min.js"></script>
|
||||
<script src="rickshaw.min.js"></script>
|
||||
<script>
|
||||
@@ -50,7 +50,7 @@ function refresh () {
|
||||
points.push({ x: s2.now, y: speed });
|
||||
}
|
||||
$("#speed").text("~" + speed.toFixed(1) + " hashes/second");
|
||||
var msg = ("I'm attending a @docker orchestration workshop, "
|
||||
var msg = ("I'm attending the @docker workshop at #LinuxCon, "
|
||||
+ "and my #DockerCoins mining rig is crunching "
|
||||
+ speed.toFixed(1) + " hashes/second! W00T!");
|
||||
$("#tweet").attr(
|
||||
|
||||
1
dockercoins/webui/files/jquery.js
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
jquery-1.11.3.min.js
|
||||
@@ -1,34 +1,26 @@
|
||||
import express from 'express';
|
||||
import morgan from 'morgan';
|
||||
import { createClient } from 'redis';
|
||||
|
||||
var client = await createClient({
|
||||
url: "redis://redis",
|
||||
socket: {
|
||||
family: 0
|
||||
}
|
||||
})
|
||||
.on("error", function (err) {
|
||||
console.error("Redis error", err);
|
||||
})
|
||||
.connect();
|
||||
|
||||
var express = require('express');
|
||||
var app = express();
|
||||
var redis = require('redis');
|
||||
|
||||
app.use(morgan('common'));
|
||||
var client = redis.createClient(6379, 'redis');
|
||||
client.on("error", function (err) {
|
||||
console.error("Redis error", err);
|
||||
});
|
||||
|
||||
app.get('/', function (req, res) {
|
||||
res.redirect('/index.html');
|
||||
});
|
||||
|
||||
app.get('/json', async(req, res) => {
|
||||
var coins = await client.hLen('wallet');
|
||||
var hashes = await client.get('hashes');
|
||||
var now = Date.now() / 1000;
|
||||
res.json({
|
||||
coins: coins,
|
||||
hashes: hashes,
|
||||
now: now
|
||||
app.get('/json', function (req, res) {
|
||||
client.hlen('wallet', function (err, coins) {
|
||||
client.get('hashes', function (err, hashes) {
|
||||
var now = Date.now() / 1000;
|
||||
res.json( {
|
||||
coins: coins,
|
||||
hashes: hashes,
|
||||
now: now
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
FROM python:alpine
|
||||
WORKDIR /app
|
||||
RUN pip install redis
|
||||
RUN pip install requests
|
||||
COPY worker.py .
|
||||
COPY worker.py /
|
||||
CMD ["python", "worker.py"]
|
||||
|
||||
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
|
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 26 KiB |
|
Before Width: | Height: | Size: 680 KiB After Width: | Height: | Size: 680 KiB |
|
Before Width: | Height: | Size: 137 KiB After Width: | Height: | Size: 137 KiB |
|
Before Width: | Height: | Size: 252 KiB After Width: | Height: | Size: 252 KiB |
|
Before Width: | Height: | Size: 213 KiB After Width: | Height: | Size: 213 KiB |
|
Before Width: | Height: | Size: 901 KiB After Width: | Height: | Size: 901 KiB |
|
Before Width: | Height: | Size: 575 KiB After Width: | Height: | Size: 575 KiB |
|
Before Width: | Height: | Size: 205 KiB After Width: | Height: | Size: 205 KiB |
19
docs/extract-section-titles.py
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Extract and print level 1 and 2 titles from workshop slides.
|
||||
"""
|
||||
|
||||
separators = [
|
||||
"---",
|
||||
"--"
|
||||
]
|
||||
|
||||
slide_count = 1
|
||||
for line in open("index.html"):
|
||||
line = line.strip()
|
||||
if line in separators:
|
||||
slide_count += 1
|
||||
if line.startswith('# '):
|
||||
print slide_count, '# #', line
|
||||
elif line.startswith('# '):
|
||||
print slide_count, line
|
||||
|
Before Width: | Height: | Size: 147 KiB After Width: | Height: | Size: 147 KiB |
|
Before Width: | Height: | Size: 145 KiB After Width: | Height: | Size: 145 KiB |
6109
docs/index.html
Normal file
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 145 KiB After Width: | Height: | Size: 145 KiB |
|
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 59 KiB |
18
docs/remark-0.14.min.js
vendored
Normal file
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 266 KiB After Width: | Height: | Size: 266 KiB |
|
Before Width: | Height: | Size: 1.2 MiB After Width: | Height: | Size: 1.2 MiB |
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
|
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
@@ -2,14 +2,14 @@ version: "2"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch:2
|
||||
image: elasticsearch
|
||||
# If you need to access ES directly, just uncomment those lines.
|
||||
#ports:
|
||||
# - "9200:9200"
|
||||
# - "9300:9300"
|
||||
|
||||
logstash:
|
||||
image: logstash:2
|
||||
image: logstash
|
||||
command: |
|
||||
-e '
|
||||
input {
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
- "12201:12201/udp"
|
||||
|
||||
kibana:
|
||||
image: kibana:4
|
||||
image: kibana
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
use-forwarded-headers: true
|
||||
compute-full-forwarded-for: true
|
||||
use-proxy-protocol: true
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: ingress-nginx
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- M6-ingress-nginx-components.yaml
|
||||
- sync.yaml
|
||||
patches:
|
||||
- path: M6-ingress-nginx-cm-patch.yaml
|
||||
target:
|
||||
kind: ConfigMap
|
||||
- path: M6-ingress-nginx-svc-patch.yaml
|
||||
target:
|
||||
kind: Service
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
|
||||
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: kyverno
|
||||
@@ -1,72 +0,0 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: flux-multi-tenancy
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: serviceAccountName
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
- HelmRelease
|
||||
validate:
|
||||
message: ".spec.serviceAccountName is required"
|
||||
pattern:
|
||||
spec:
|
||||
serviceAccountName: "?*"
|
||||
- name: kustomizationSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
- name: helmReleaseSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- HelmRelease
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: monitoring
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: monitoring
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: grafana.test.metal.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: kube-prometheus-stack-grafana
|
||||
port:
|
||||
number: 80
|
||||
@@ -1,35 +0,0 @@
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
ingress:
|
||||
- from: []
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-db
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: db
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: openebs
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: openebs
|
||||
resources:
|
||||
- M6-openebs-components.yaml
|
||||
- sync.yaml
|
||||
configMapGenerator:
|
||||
- name: openebs-values
|
||||
files:
|
||||
- values.yaml=M6-openebs-values.yaml
|
||||
configurations:
|
||||
- M6-openebs-kustomizeconfig.yaml
|
||||
@@ -1,6 +0,0 @@
|
||||
nameReference:
|
||||
- kind: ConfigMap
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- path: spec/valuesFrom/name
|
||||
kind: HelmRelease
|
||||
@@ -1,15 +0,0 @@
|
||||
# helm install openebs --namespace openebs openebs/openebs
|
||||
# --set engines.replicated.mayastor.enabled=false
|
||||
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
|
||||
# --create-namespace
|
||||
engines:
|
||||
replicated:
|
||||
mayastor:
|
||||
enabled: false
|
||||
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
|
||||
lvm-localpv:
|
||||
lvmNode:
|
||||
kubeletDir: /var/lib/k0s/kubelet/
|
||||
localprovisioner:
|
||||
hostpathClass:
|
||||
isDefaultClass: true
|
||||
@@ -1,38 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
namespace: rocky-test
|
||||
name: rocky-full-access
|
||||
rules:
|
||||
- apiGroups: ["", extensions, apps]
|
||||
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
|
||||
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rocky-pv-access
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [persistentvolumes]
|
||||
verbs: [get, list, watch, create, patch]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
toolkit.fluxcd.io/tenant: rocky
|
||||
name: rocky-reconciler2
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rocky-pv-access
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: gotk:rocky-test:reconciler
|
||||
- kind: ServiceAccount
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: rocky.test.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: web
|
||||
port:
|
||||
number: 80
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ../../base/rocky
|
||||
patches:
|
||||
- path: M6-rocky-test-patch.yaml
|
||||
target:
|
||||
kind: Kustomization
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
path: ./k8s/plain
|
||||
@@ -1,8 +0,0 @@
|
||||
k8s_yaml(helm(
|
||||
"./path-to-chart", name="blue",
|
||||
values=[], # Example: ["./path/to/values.yaml"]
|
||||
set=[
|
||||
"image.repository=jpetazzo/color",
|
||||
"image.tag=latest",
|
||||
]
|
||||
))
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: AdmissionConfiguration
|
||||
plugins:
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
|
||||
kind: PodSecurityConfiguration
|
||||
defaults:
|
||||
enforce: baseline
|
||||
audit: baseline
|
||||
warn: baseline
|
||||
exemptions:
|
||||
usernames:
|
||||
- cluster-admin
|
||||
namespaces:
|
||||
- kube-system
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
name: blue
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: blue
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
spec:
|
||||
containers:
|
||||
- image: jpetazzo/color
|
||||
name: color
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
name: blue
|
||||
spec:
|
||||
ports:
|
||||
- name: "80"
|
||||
port: 80
|
||||
selector:
|
||||
app: blue
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
whatever: 90%
|
||||
whatever-new: 10%
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever-new
|
||||
servicePort: 80
|
||||
@@ -1,36 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /.well-known/acme-challenge/
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: certbot
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: certbot
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: A.B.C.D
|
||||
ports:
|
||||
- port: 8000
|
||||
protocol: TCP
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: xyz.A.B.C.D.nip.io
|
||||
spec:
|
||||
secretName: xyz.A.B.C.D.nip.io
|
||||
dnsNames:
|
||||
- xyz.A.B.C.D.nip.io
|
||||
issuerRef:
|
||||
name: letsencrypt-staging
|
||||
kind: ClusterIssuer
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# Remember to update this if you use this manifest to obtain real certificates :)
|
||||
email: hello@example.com
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
# To use the production environment, use the following line instead:
|
||||
#server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: issuer-letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
|
||||
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
|
||||
# for reference, but it's not intended to be used in modern trainings.
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
required: [ spec ]
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
required: [ taste ]
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: fruity
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: liberica
|
||||
spec:
|
||||
taste: smoky
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
spec:
|
||||
taste: stronger
|
||||
bitterness: high
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: java
|
||||
@@ -1,78 +0,0 @@
|
||||
# Basic Consul cluster using Cloud Auto-Join.
|
||||
# Caveats:
|
||||
# - no actual persistence
|
||||
# - scaling down to 1 will break the cluster
|
||||
# - pods may be colocated
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.11"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
@@ -1,89 +0,0 @@
|
||||
# Better Consul cluster.
|
||||
# There is still no actual persistence, but:
|
||||
# - podAntiaffinity prevents pod colocation
|
||||
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.11"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
@@ -1,99 +0,0 @@
|
||||
# Even better Consul cluster.
|
||||
# That one uses a volumeClaimTemplate to achieve true persistence.
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.11"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
@@ -1,340 +0,0 @@
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-key-holder
|
||||
- kubernetes-dashboard-certs
|
||||
- kubernetes-dashboard-csrf
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-settings
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- dashboard-metrics-scraper
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- proxy
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- 'http:heapster:'
|
||||
- 'https:heapster:'
|
||||
- dashboard-metrics-scraper
|
||||
- http:dashboard-metrics-scraper
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 443
|
||||
targetPort: http
|
||||
selector:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
- --enable-skip-login
|
||||
- --enable-insecure-login
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9090
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: kubernetes-dashboard
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8000
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: dashboard-metrics-scraper
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard:insecure
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -1,325 +0,0 @@
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-key-holder
|
||||
- kubernetes-dashboard-certs
|
||||
- kubernetes-dashboard-csrf
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-settings
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- dashboard-metrics-scraper
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- proxy
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- 'http:heapster:'
|
||||
- 'https:heapster:'
|
||||
- dashboard-metrics-scraper
|
||||
- http:dashboard-metrics-scraper
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: https
|
||||
selector:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: kubernetes-dashboard
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8000
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: dashboard-metrics-scraper
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
@@ -1,355 +0,0 @@
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-key-holder
|
||||
- kubernetes-dashboard-certs
|
||||
- kubernetes-dashboard-csrf
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-settings
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- dashboard-metrics-scraper
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- proxy
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- 'http:heapster:'
|
||||
- 'https:heapster:'
|
||||
- dashboard-metrics-scraper
|
||||
- http:dashboard-metrics-scraper
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: https
|
||||
selector:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: kubernetes-dashboard
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8000
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: dashboard-metrics-scraper
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard:cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/service-account-token
|
||||
metadata:
|
||||
name: cluster-admin-token
|
||||
namespace: kubernetes-dashboard
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: cluster-admin
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: build-image
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: docker-build
|
||||
image: docker
|
||||
env:
|
||||
- name: REGISTRY_PORT
|
||||
value: #"30000"
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
mkdir /workspace &&
|
||||
git clone https://github.com/jpetazzo/container.training /workspace &&
|
||||
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
|
||||
docker push localhost:$REGISTRY_PORT/worker
|
||||
volumeMounts:
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
|
||||