From 9b54ea95acd1ff1bd44fa4e4061e7b8e12964cf7 Mon Sep 17 00:00:00 2001 From: Jerome Petazzoni Date: Fri, 28 Sep 2018 13:18:54 -0500 Subject: [PATCH 1/2] Massive refactoring of workshopctl This allows to manage groups of VMs across multiple infrastructure providers. It also adds support to create groups of VMs on OpenStack. WARNING: the syntax of workshopctl has changed slightly. Check READMEs for details. --- .gitignore | 5 +- prepare-vms/README.md | 126 ++--- prepare-vms/infra/example.aws | 6 + prepare-vms/infra/example.generic | 2 + prepare-vms/infra/example.openstack | 9 + prepare-vms/lib/aws.sh | 105 ---- prepare-vms/lib/cli.sh | 39 +- prepare-vms/lib/commands.sh | 465 +++++++----------- prepare-vms/lib/infra.sh | 26 + prepare-vms/lib/infra/aws.sh | 208 ++++++++ prepare-vms/lib/infra/generic.sh | 7 + prepare-vms/lib/infra/openstack.sh | 20 + prepare-vms/lib/ips-txt-to-html.py | 8 +- prepare-vms/lib/pssh.sh | 11 +- prepare-vms/settings/enix.yaml | 2 +- prepare-vms/settings/kube101.yaml | 2 +- prepare-vms/{ => templates}/cards.html | 0 prepare-vms/{ => templates}/clusters.csv | 0 prepare-vms/{settings => templates}/enix.html | 0 .../{settings => templates}/kube101.html | 0 prepare-vms/terraform/keypair.tf | 5 + prepare-vms/terraform/machines.tf | 32 ++ prepare-vms/terraform/network.tf | 23 + prepare-vms/terraform/provider.tf | 13 + prepare-vms/terraform/secgroup.tf | 12 + prepare-vms/terraform/vars.tf | 8 + prepare-vms/workshopctl | 69 +-- 27 files changed, 685 insertions(+), 518 deletions(-) create mode 100644 prepare-vms/infra/example.aws create mode 100644 prepare-vms/infra/example.generic create mode 100644 prepare-vms/infra/example.openstack delete mode 100644 prepare-vms/lib/aws.sh create mode 100644 prepare-vms/lib/infra.sh create mode 100644 prepare-vms/lib/infra/aws.sh create mode 100644 prepare-vms/lib/infra/generic.sh create mode 100644 prepare-vms/lib/infra/openstack.sh rename prepare-vms/{ => templates}/cards.html (100%) rename prepare-vms/{ => templates}/clusters.csv (100%) rename prepare-vms/{settings => templates}/enix.html (100%) rename prepare-vms/{settings => templates}/kube101.html (100%) create mode 100644 prepare-vms/terraform/keypair.tf create mode 100644 prepare-vms/terraform/machines.tf create mode 100644 prepare-vms/terraform/network.tf create mode 100644 prepare-vms/terraform/provider.tf create mode 100644 prepare-vms/terraform/secgroup.tf create mode 100644 prepare-vms/terraform/vars.tf diff --git a/.gitignore b/.gitignore index 3610b425..cc8fd624 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,8 @@ *.pyc *.swp *~ -prepare-vms/ips.txt -prepare-vms/ips.html -prepare-vms/ips.pdf -prepare-vms/settings.yaml prepare-vms/tags +prepare-vms/infra slides/*.yml.html slides/autopilot/state.yaml slides/index.html diff --git a/prepare-vms/README.md b/prepare-vms/README.md index 7ce1483f..83d3c3b6 100644 --- a/prepare-vms/README.md +++ b/prepare-vms/README.md @@ -1,4 +1,10 @@ -# Trainer tools to create and prepare VMs for Docker workshops on AWS or Azure +# Trainer tools to create and prepare VMs for Docker workshops + +These tools can help you to create VMs on: + +- Azure +- EC2 +- OpenStack ## Prerequisites @@ -6,6 +12,9 @@ - [Docker Compose](https://docs.docker.com/compose/install/) - [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this +Depending on the infrastructure that you want to use, you also need to install +the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment). + And if you want to generate printable cards: - [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`) @@ -14,20 +23,25 @@ And if you want to generate printable cards: ## General Workflow - fork/clone repo -- set required environment variables +- create an infrastructure configuration in the `infra` sdirectory + (using one of the example files in that directory) - create your own setting file from `settings/example.yaml` - if necessary, increase allowed open files: `ulimit -Sn 10000` -- run `./workshopctl` commands to create instances, install docker, setup each users environment in node1, other management tasks -- run `./workshopctl cards` command to generate PDF for printing handouts of each users host IP's and login info +- run `./workshopctl start` to create instances +- run `./workshopctl deploy` to install Docker and setup environment +- run `./workshopctl kube` (if you want to install and setup Kubernetes) +- run `./workshopctl cards` (if you want to generate PDF for printing handouts of each users host IP's and login info) +- run `./workshopctl stop` at the end of the workshop to terminate instances ## Clone/Fork the Repo, and Build the Tools Image The Docker Compose file here is used to build a image with all the dependencies to run the `./workshopctl` commands and optional tools. Each run of the script will check if you have those dependencies locally on your host, and will only use the container if you're [missing a dependency](workshopctl#L5). - $ git clone https://github.com/jpetazzo/orchestration-workshop.git - $ cd orchestration-workshop/prepare-vms + $ git clone https://github.com/jpetazzo/container.training + $ cd container.training/prepare-vms $ docker-compose build + ## Preparing to Run `./workshopctl` ### Required AWS Permissions/Info @@ -36,27 +50,37 @@ The Docker Compose file here is used to build a image with all the dependencies - Using a non-default VPC or Security Group isn't supported out of box yet, so you will have to customize `lib/commands.sh` if you want to change that. - These instances will assign the default VPC Security Group, which does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`, or run `./workshopctl opensg` which opens up all ports. -### Required Environment Variables +### Create your `infra` file -- `AWS_ACCESS_KEY_ID` -- `AWS_SECRET_ACCESS_KEY` -- `AWS_DEFAULT_REGION` +You need to do this only once. (On AWS, you can create one `infra` +file per region.) -If you're not using AWS, set these to placeholder values: +Make a copy of one of the example files in the `infra` directory. +For instance: + +```bash +cp infra/example.aws infra/aws-us-west-2 ``` -export AWS_ACCESS_KEY_ID="foo" -export AWS_SECRET_ACCESS_KEY="foo" -export AWS_DEFAULT_REGION="foo" -``` + +Edit your infrastructure file to customize it. +You will probably need to put your cloud provider credentials, +select region... If you don't have the `aws` CLI installed, you will get a warning that it's a missing dependency. If you're not using AWS you can ignore this. -### Update/copy `settings/example.yaml` +### Create your `settings` file -Then pass `settings/YOUR_WORKSHOP_NAME-settings.yaml` as an argument to `./workshopctl deploy`, `./workshopctl cards`, etc. +Similarly, pick one of the files in `settings` and copy it +to customize it. -./workshopctl cards 2016-09-28-00-33-bret settings/orchestration.yaml +For instance: + +```bash +cp settings/example.yaml settings/myworkshop.yaml +``` + +You're all set! ## `./workshopctl` Usage @@ -66,7 +90,7 @@ Commands: ami Show the AMI that will be used for deployment amis List Ubuntu AMIs in the current region build Build the Docker image to run this program in a container -cards Generate ready-to-print cards for a batch of VMs +cards Generate ready-to-print cards for a group of VMs deploy Install Docker on a bunch of running VMs ec2quotas Check our EC2 quotas (max instances) help Show available commands @@ -74,14 +98,14 @@ ids List the instance IDs belonging to a given tag or token ips List the IP addresses of the VMs for a given tag or token kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy) kubetest Check that all notes are reporting as Ready -list List available batches in the current region +list List available groups in the current region opensg Open the default security group to ALL ingress traffic pull_images Pre-pull a bunch of Docker images -retag Apply a new tag to a batch of VMs -start Start a batch of VMs -status List instance status for a given batch +retag Apply a new tag to a group of VMs +start Start a group of VMs +status List instance status for a given group stop Stop (terminate, shutdown, kill, remove, destroy...) instances -test Run tests (pre-flight checks) on a batch of VMs +test Run tests (pre-flight checks) on a group of VMs wrap Run this program in a container ``` @@ -95,16 +119,16 @@ wrap Run this program in a container - During `start` it will add your default local SSH key to all instances under the `ubuntu` user. - During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file. -### Example Steps to Launch a Batch of AWS Instances for a Workshop +### Example Steps to Launch a group of AWS Instances for a Workshop -- Run `./workshopctl start N` Creates `N` EC2 instances +- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances - Your local SSH key will be synced to instances under `ubuntu` user - AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/` -- Run `./workshopctl deploy TAG settings/somefile.yaml` to run `lib/postprep.py` via parallel-ssh +- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh - If it errors or times out, you should be able to rerun - Requires good connection to run all the parallel SSH connections, up to 100 parallel (ProTip: create dedicated management instance in same AWS region where you run all these utils from) - Run `./workshopctl pull_images TAG` to pre-pull a bunch of Docker images to the instances -- Run `./workshopctl cards TAG settings/somefile.yaml` generates PDF/HTML files to print and cut and hand out to students +- Run `./workshopctl cards TAG` generates PDF/HTML files to print and cut and hand out to students - *Have a great workshop* - Run `./workshopctl stop TAG` to terminate instances. @@ -155,28 +179,12 @@ az group delete --resource-group workshop ### Example Steps to Configure Instances from a non-AWS Source -- Launch instances via your preferred method. You'll need to get the instance IPs and be able to ssh into them. -- Set placeholder values for [AWS environment variable settings](#required-environment-variables). -- Choose a tag. It could be an event name, datestamp, etc. Ensure you have created a directory for your tag: `prepare-vms/tags//` -- If you have not already generated a file with the IPs to be configured: - - The file should be named `prepare-vms/tags//ips.txt` - - Format is one IP per line, no other info needed. -- Ensure the settings file is as desired (especially the number of nodes): `prepare-vms/settings/kube101.yaml` -- For a tag called `myworkshop`, configure instances: `workshopctl deploy myworkshop settings/kube101.yaml` -- Optionally, configure Kubernetes clusters of the size in the settings: `workshopctl kube myworkshop` -- Optionally, test your Kubernetes clusters. They may take a little time to become ready: `workshopctl kubetest myworkshop` -- Generate cards to print and hand out: `workshopctl cards myworkshop settings/kube101.yaml` -- Print the cards file: `prepare-vms/tags/myworkshop/ips.html` - - -## Other Tools - -### Deploying your SSH key to all the machines - -- Make sure that you have SSH keys loaded (`ssh-add -l`). -- Source `rc`. -- Run `pcopykey`. - +- Copy `infra/example.generic` to `infra/generic` +- Run `./workshopctl --infra infra/generic --settings settings/...yaml` +- Launch instances via your preferred method. You'll need to get the instance IPs and be able to SSH into them. +- Create the file `prepare-vms/tags/TAG/ips.txt`, it should list the IP addresses of the VMs (one per line, without any comments or other info) +- Continue deployment with `./workshopctl deploy TAG` + (all subsequent commands are the same as with VMs created with another method) ## Even More Details @@ -188,7 +196,7 @@ To see which local key will be uploaded, run `ssh-add -l | grep RSA`. #### Instance + tag creation -10 VMs will be started, with an automatically generated tag (timestamp + your username). +The VMs will be started, with an automatically generated tag (timestamp + your username). Your SSH key will be added to the `authorized_keys` of the ubuntu user. @@ -196,15 +204,11 @@ Your SSH key will be added to the `authorized_keys` of the ubuntu user. Following the creation of the VMs, a text file will be created containing a list of their IPs. -This ips.txt file will be created in the $TAG/ directory and a symlink will be placed in the working directory of the script. - -If you create new VMs, the symlinked file will be overwritten. - #### Deployment Instances can be deployed manually using the `deploy` command: - $ ./workshopctl deploy TAG settings/somefile.yaml + $ ./workshopctl deploy TAG The `postprep.py` file will be copied via parallel-ssh to all of the VMs and executed. @@ -214,7 +218,7 @@ The `postprep.py` file will be copied via parallel-ssh to all of the VMs and exe #### Generate cards - $ ./workshopctl cards TAG settings/somefile.yaml + $ ./workshopctl cards TAG If you want to generate both HTML and PDF cards, install [wkhtmltopdf](https://wkhtmltopdf.org/downloads.html); without that installed, only HTML cards will be generated. @@ -222,13 +226,11 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m #### List tags - $ ./workshopctl list + $ ./workshopctl list infra/some-infra-file -#### List VMs + $ ./workshopctl listall - $ ./workshopctl list TAG - -This will print a human-friendly list containing some information about each instance. + $ ./workshopctl tags #### Stop and destroy VMs diff --git a/prepare-vms/infra/example.aws b/prepare-vms/infra/example.aws new file mode 100644 index 00000000..05183d2c --- /dev/null +++ b/prepare-vms/infra/example.aws @@ -0,0 +1,6 @@ +INFRACLASS=aws +# If you are using AWS to deploy, copy this file (e.g. to "aws", or "us-east-1") +# and customize the variables below. +export AWS_DEFAULT_REGION=us-east-1 +export AWS_ACCESS_KEY_ID=AKI... +export AWS_SECRET_ACCESS_KEY=... diff --git a/prepare-vms/infra/example.generic b/prepare-vms/infra/example.generic new file mode 100644 index 00000000..5846a899 --- /dev/null +++ b/prepare-vms/infra/example.generic @@ -0,0 +1,2 @@ +INFRACLASS=generic +# This is for manual provisioning. No other variable or configuration is needed. diff --git a/prepare-vms/infra/example.openstack b/prepare-vms/infra/example.openstack new file mode 100644 index 00000000..a99c43af --- /dev/null +++ b/prepare-vms/infra/example.openstack @@ -0,0 +1,9 @@ +INFRACLASS=openstack +# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix") +# and customize the variables below. +export TF_VAR_user="jpetazzo" +export TF_VAR_tenant="training" +export TF_VAR_domain="Default" +export TF_VAR_password="..." +export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3" +export TF_VAR_flavor="GP1.S" \ No newline at end of file diff --git a/prepare-vms/lib/aws.sh b/prepare-vms/lib/aws.sh deleted file mode 100644 index e99abad7..00000000 --- a/prepare-vms/lib/aws.sh +++ /dev/null @@ -1,105 +0,0 @@ -aws_display_tags() { - # Print all "Name" tags in our region with their instance count - echo "[#] [Status] [Token] [Tag]" \ - | awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}' - aws ec2 describe-instances \ - --query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \ - | tr -d "\r" \ - | uniq -c \ - | sort -k 3 \ - | awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}' -} - -aws_get_tokens() { - aws ec2 describe-instances --output text \ - --query 'Reservations[*].Instances[*].[ClientToken]' \ - | sort -u -} - -aws_display_instance_statuses_by_tag() { - TAG=$1 - need_tag $TAG - - IDS=$(aws ec2 describe-instances \ - --filters "Name=tag:Name,Values=$TAG" \ - --query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ') - - aws ec2 describe-instance-status \ - --instance-ids $IDS \ - --query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \ - --output table -} - -aws_display_instances_by_tag() { - TAG=$1 - need_tag $TAG - result=$(aws ec2 describe-instances --output table \ - --filter "Name=tag:Name,Values=$TAG" \ - --query "Reservations[*].Instances[*].[ \ - InstanceId, \ - State.Name, \ - Tags[0].Value, \ - PublicIpAddress, \ - InstanceType \ - ]" - ) - if [[ -z $result ]]; then - die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION." - else - echo "$result" - fi -} - -aws_get_instance_ids_by_filter() { - FILTER=$1 - aws ec2 describe-instances --filters $FILTER \ - --query Reservations[*].Instances[*].InstanceId \ - --output text | tr "\t" "\n" | tr -d "\r" -} - -aws_get_instance_ids_by_client_token() { - TOKEN=$1 - need_tag $TOKEN - aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN -} - -aws_get_instance_ids_by_tag() { - TAG=$1 - need_tag $TAG - aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG -} - -aws_get_instance_ips_by_tag() { - TAG=$1 - need_tag $TAG - aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \ - --output text \ - --query "Reservations[*].Instances[*].PublicIpAddress" \ - | tr "\t" "\n" \ - | sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs -} - -aws_kill_instances_by_tag() { - TAG=$1 - need_tag $TAG - IDS=$(aws_get_instance_ids_by_tag $TAG) - if [ -z "$IDS" ]; then - die "Invalid tag." - fi - - info "Deleting instances with tag $TAG." - - aws ec2 terminate-instances --instance-ids $IDS \ - | grep ^TERMINATINGINSTANCES - - info "Deleted instances with tag $TAG." -} - -aws_tag_instances() { - OLD_TAG_OR_TOKEN=$1 - NEW_TAG=$2 - IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN) - [[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null - IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN) - [[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null -} diff --git a/prepare-vms/lib/cli.sh b/prepare-vms/lib/cli.sh index 682c121b..47e86e80 100644 --- a/prepare-vms/lib/cli.sh +++ b/prepare-vms/lib/cli.sh @@ -50,27 +50,38 @@ sep() { fi } -need_tag() { +need_infra() { if [ -z "$1" ]; then + die "Please specify infrastructure file. (e.g.: infra/aws)" + fi + if [ ! -f "$1" ]; then + die "Infrastructure file $1 doesn't exist." + fi + . "$1" + . "lib/infra/$INFRACLASS.sh" +} + +need_tag() { + if [ -z "$TAG" ]; then die "Please specify a tag or token. To see available tags and tokens, run: $0 list" fi + if [ ! -d "tags/$TAG" ]; then + die "Tag $TAG not found (directory tags/$TAG does not exist)." + fi + for FILE in settings.yaml ips.txt infra.sh; do + if [ ! -f "tags/$TAG/$FILE" ]; then + warning "File tags/$TAG/$FILE not found." + fi + done + . "tags/$TAG/infra.sh" + . "lib/infra/$INFRACLASS.sh" } need_settings() { if [ -z "$1" ]; then - die "Please specify a settings file." - elif [ ! -f "$1" ]; then + die "Please specify a settings file. (e.g.: settings/kube101.yaml)" + fi + if [ ! -f "$1" ]; then die "Settings file $1 doesn't exist." fi } - -need_ips_file() { - IPS_FILE=$1 - if [ -z "$IPS_FILE" ]; then - die "IPS_FILE not set." - fi - - if [ ! -s "$IPS_FILE" ]; then - die "IPS_FILE $IPS_FILE not found. Please run: $0 ips " - fi -} diff --git a/prepare-vms/lib/commands.sh b/prepare-vms/lib/commands.sh index f1ee86a8..23ecfcfc 100644 --- a/prepare-vms/lib/commands.sh +++ b/prepare-vms/lib/commands.sh @@ -7,21 +7,11 @@ _cmd() { _cmd help "Show available commands" _cmd_help() { - printf "$(basename $0) - the orchestration workshop swiss army knife\n" + printf "$(basename $0) - the container training swiss army knife\n" printf "Commands:" printf "%s" "$HELP" | sort } -_cmd amis "List Ubuntu AMIs in the current region" -_cmd_amis() { - find_ubuntu_ami -r $AWS_DEFAULT_REGION "$@" -} - -_cmd ami "Show the AMI that will be used for deployment" -_cmd_ami() { - find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 16.04 -t hvm:ebs -N -q -} - _cmd build "Build the Docker image to run this program in a container" _cmd_build() { docker-compose build @@ -32,56 +22,38 @@ _cmd_wrap() { docker-compose run --rm workshopctl "$@" } -_cmd cards "Generate ready-to-print cards for a batch of VMs" +_cmd cards "Generate ready-to-print cards for a group of VMs" _cmd_cards() { TAG=$1 - SETTINGS=$2 - need_tag $TAG - need_settings $SETTINGS + need_tag - # If you're not using AWS, populate the ips.txt file manually - if [ ! -f tags/$TAG/ips.txt ]; then - aws_get_instance_ips_by_tag $TAG >tags/$TAG/ips.txt - fi - - # Remove symlinks to old cards - rm -f ips.html ips.pdf - - # This will generate two files in the base dir: ips.pdf and ips.html - lib/ips-txt-to-html.py $SETTINGS - - for f in ips.html ips.pdf; do - # Remove old versions of cards if they exist - rm -f tags/$TAG/$f - - # Move the generated file and replace it with a symlink - mv -f $f tags/$TAG/$f && ln -s tags/$TAG/$f $f - done + # This will process ips.txt to generate two files: ips.pdf and ips.html + ( + cd tags/$TAG + ../../lib/ips-txt-to-html.py settings.yaml + ) info "Cards created. You can view them with:" - info "xdg-open ips.html ips.pdf (on Linux)" - info "open ips.html ips.pdf (on MacOS)" + info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)" + info "open tags/$TAG/ips.html tags/$TAG/ips.pdf (on MacOS)" } _cmd deploy "Install Docker on a bunch of running VMs" _cmd_deploy() { TAG=$1 - SETTINGS=$2 - need_tag $TAG - need_settings $SETTINGS - link_tag $TAG - count=$(wc -l ips.txt) + need_tag # wait until all hosts are reachable before trying to deploy info "Trying to reach $TAG instances..." - while ! tag_is_reachable $TAG; do + while ! tag_is_reachable; do >/dev/stderr echo -n "." sleep 2 done >/dev/stderr echo "" + echo deploying > tags/$TAG/status sep "Deploying tag $TAG" - pssh -I tee /tmp/settings.yaml <$SETTINGS + pssh -I tee /tmp/settings.yaml >/tmp/pp.out 2>>/tmp/pp.err" >/tmp/pp.out 2>>/tmp/pp.err" tags/$TAG/status info "You may want to run one of the following commands:" info "$0 kube $TAG" info "$0 pull_images $TAG" - info "$0 cards $TAG $SETTINGS" + info "$0 cards $TAG" } _cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)" _cmd_kube() { + TAG=$1 + need_tag # Install packages pssh --timeout 200 " @@ -183,12 +158,14 @@ _cmd_kube() { helm completion bash | sudo tee /etc/bash_completion.d/helm fi" - sep "Done" } -_cmd kubetest "Check that all notes are reporting as Ready" +_cmd kubetest "Check that all nodes are reporting as Ready" _cmd_kubetest() { + TAG=$1 + need_tag + # There are way too many backslashes in the command below. # Feel free to make that better ♥ pssh " @@ -202,7 +179,7 @@ _cmd_kubetest() { fi" } -_cmd ids "List the instance IDs belonging to a given tag or token" +_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token" _cmd_ids() { TAG=$1 need_tag $TAG @@ -215,176 +192,31 @@ _cmd_ids() { aws_get_instance_ids_by_client_token $TAG } -_cmd ips "List the IP addresses of the VMs for a given tag or token" -_cmd_ips() { - TAG=$1 - need_tag $TAG - mkdir -p tags/$TAG - aws_get_instance_ips_by_tag $TAG | tee tags/$TAG/ips.txt - link_tag $TAG -} - -_cmd list "List available batches in the current region" +_cmd list "List available groups for a given infrastructure" _cmd_list() { - info "Listing batches in region $AWS_DEFAULT_REGION:" - aws_display_tags + need_infra $1 + infra_list } -_cmd status "List instance status for a given batch" -_cmd_status() { - info "Using region $AWS_DEFAULT_REGION." - TAG=$1 - need_tag $TAG - describe_tag $TAG - tag_is_reachable $TAG - info "You may be interested in running one of the following commands:" - info "$0 ips $TAG" - info "$0 deploy $TAG " -} - -_cmd opensg "Open the default security group to ALL ingress traffic" -_cmd_opensg() { - aws ec2 authorize-security-group-ingress \ - --group-name default \ - --protocol icmp \ - --port -1 \ - --cidr 0.0.0.0/0 - - aws ec2 authorize-security-group-ingress \ - --group-name default \ - --protocol udp \ - --port 0-65535 \ - --cidr 0.0.0.0/0 - - aws ec2 authorize-security-group-ingress \ - --group-name default \ - --protocol tcp \ - --port 0-65535 \ - --cidr 0.0.0.0/0 -} - -_cmd pull_images "Pre-pull a bunch of Docker images" -_cmd_pull_images() { - TAG=$1 - need_tag $TAG - pull_tag $TAG -} - -_cmd retag "Apply a new tag to a batch of VMs" -_cmd_retag() { - OLDTAG=$1 - NEWTAG=$2 - need_tag $OLDTAG - if [[ -z "$NEWTAG" ]]; then - die "You must specify a new tag to apply." - fi - aws_tag_instances $OLDTAG $NEWTAG -} - -_cmd start "Start a batch of VMs" -_cmd_start() { - # Number of instances to create - COUNT=$1 - # Optional settings file (to carry on with deployment) - SETTINGS=$2 - - if [ -z "$COUNT" ]; then - die "Indicate number of instances to start." - fi - - # Print our AWS username, to ease the pain of credential-juggling - greet - - # Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys - key_name=$(sync_keys) - - AMI=$(_cmd_ami) # Retrieve the AWS image ID - if [ -z "$AMI" ]; then - die "I could not find which AMI to use in this region. Try another region?" - fi - TOKEN=$(get_token) # generate a timestamp token for this batch of VMs - AWS_KEY_NAME=$(make_key_name) - - sep "Starting instances" - info " Count: $COUNT" - info " Region: $AWS_DEFAULT_REGION" - info " Token/tag: $TOKEN" - info " AMI: $AMI" - info " Key name: $AWS_KEY_NAME" - result=$(aws ec2 run-instances \ - --key-name $AWS_KEY_NAME \ - --count $COUNT \ - --instance-type ${AWS_INSTANCE_TYPE-t2.medium} \ - --client-token $TOKEN \ - --image-id $AMI) - reservation_id=$(echo "$result" | head -1 | awk '{print $2}') - info "Reservation ID: $reservation_id" - sep - - # if instance creation succeeded, we should have some IDs - IDS=$(aws_get_instance_ids_by_client_token $TOKEN) - if [ -z "$IDS" ]; then - die "Instance creation failed." - fi - - # Tag these new instances with a tag that is the same as the token - TAG=$TOKEN - aws_tag_instances $TOKEN $TAG - - wait_until_tag_is_running $TAG $COUNT - - sep - info "Successfully created $COUNT instances with tag $TAG" - sep - - mkdir -p tags/$TAG - IPS=$(aws_get_instance_ips_by_tag $TAG) - echo "$IPS" >tags/$TAG/ips.txt - link_tag $TAG - if [ -n "$SETTINGS" ]; then - _cmd_deploy $TAG $SETTINGS - else - info "To deploy or kill these instances, run one of the following:" - info "$0 deploy $TAG " - info "$0 stop $TAG" - fi -} - -_cmd ec2quotas "Check our EC2 quotas (max instances)" -_cmd_ec2quotas() { - greet - - max_instances=$(aws ec2 describe-account-attributes \ - --attribute-names max-instances \ - --query 'AccountAttributes[*][AttributeValues]') - info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances." - - # Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list - # If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive: - # Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/" - info "Available regions:" - aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50 -} - -_cmd stop "Stop (terminate, shutdown, kill, remove, destroy...) instances" -_cmd_stop() { - TAG=$1 - need_tag $TAG - aws_kill_instances_by_tag $TAG -} - -_cmd test "Run tests (pre-flight checks) on a batch of VMs" -_cmd_test() { - TAG=$1 - need_tag $TAG - test_tag $TAG +_cmd listall "List VMs running on all configured infrastructures" +_cmd_listall() { + for infra in infra/*; do + case $infra in + infra/example.*) + ;; + *) + info "Listing infrastructure $infra:" + need_infra $infra + infra_list + ;; + esac + done } _cmd netfix "Disable GRO and run a pinger job on the VMs" _cmd_netfix () { TAG=$1 - need_tag $TAG - link_tag $TAG + need_tag pssh " sudo ethtool -K ens3 gro off @@ -406,97 +238,171 @@ EOF sudo systemctl start pinger" } -### +_cmd opensg "Open the default security group to ALL ingress traffic" +_cmd_opensg() { + need_infra $1 + infra_opensg +} + +_cmd pull_images "Pre-pull a bunch of Docker images" +_cmd_pull_images() { + TAG=$1 + need_tag + pull_tag +} + +_cmd quotas "Check our infrastructure quotas (max instances)" +_cmd_quotas() { + need_infra $1 + infra_quotas +} + +_cmd retag "(FIXME) Apply a new tag to a group of VMs" +_cmd_retag() { + OLDTAG=$1 + NEWTAG=$2 + TAG=$OLDTAG + need_tag + if [[ -z "$NEWTAG" ]]; then + die "You must specify a new tag to apply." + fi + aws_tag_instances $OLDTAG $NEWTAG +} + +_cmd start "Start a group of VMs" +_cmd_start() { + while [ ! -z "$*" ]; do + case "$1" in + --infra) INFRA=$2; shift 2;; + --settings) SETTINGS=$2; shift 2;; + --count) COUNT=$2; shift 2;; + --tag) TAG=$2; shift 2;; + *) die "Unrecognized parameter: $1." + esac + done + + if [ -z "$INFRA" ]; then + die "Please add --infra flag to specify which infrastructure file to use." + fi + if [ -z "$SETTINGS" ]; then + die "Please add --settings flag to specify which settings file to use." + fi + if [ -z "$COUNT" ]; then + COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS) + warning "No --count option was specified. Using value from settings file ($COUNT)." + fi + + # Check that the specified settings and infrastructure are valid. + need_settings $SETTINGS + need_infra $INFRA + + if [ -z "$TAG" ]; then + TAG=$(make_tag) + fi + mkdir -p tags/$TAG + ln -s ../../$INFRA tags/$TAG/infra.sh + ln -s ../../$SETTINGS tags/$TAG/settings.yaml + echo creating > tags/$TAG/status + + infra_start $COUNT + sep + info "Successfully created $COUNT instances with tag $TAG" + sep + echo created > tags/$TAG/status + + info "To deploy Docker on these instances, you can run:" + info "$0 deploy $TAG" + info "To terminate these instances, you can run:" + info "$0 stop $TAG" +} + +_cmd stop "Stop (terminate, shutdown, kill, remove, destroy...) instances" +_cmd_stop() { + TAG=$1 + need_tag + infra_stop + echo stopped > tags/$TAG/status +} + +_cmd tags "List groups of VMs known locally" +_cmd_tags() { + ( + cd tags + echo "[#] [Status] [Tag] [Infra]" \ + | awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}' + for tag in *; do + if [ -f $tag/ips.txt ]; then + count="$(wc -l < $tag/ips.txt)" + else + count="?" + fi + if [ -f $tag/status ]; then + status="$(cat $tag/status)" + else + status="?" + fi + if [ -f $tag/infra.sh ]; then + infra="$(basename $(readlink $tag/infra.sh))" + else + infra="?" + fi + echo "$count $status $tag $infra" \ + | awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}' + done + ) +} + +_cmd test "Run tests (pre-flight checks) on a group of VMs" +_cmd_test() { + TAG=$1 + need_tag + test_tag +} greet() { IAMUSER=$(aws iam get-user --query 'User.UserName') info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER." } -link_tag() { - TAG=$1 - need_tag $TAG - IPS_FILE=tags/$TAG/ips.txt - need_ips_file $IPS_FILE - ln -sf $IPS_FILE ips.txt -} - pull_tag() { - TAG=$1 - need_tag $TAG - link_tag $TAG - if [ ! -s $IPS_FILE ]; then - die "Nonexistent or empty IPs file $IPS_FILE." - fi - # Pre-pull a bunch of images pssh --timeout 900 'for I in \ - debian:latest \ - ubuntu:latest \ - fedora:latest \ - centos:latest \ - elasticsearch:2 \ - postgres \ - redis \ - alpine \ - registry \ - nicolaka/netshoot \ - jpetazzo/trainingwheels \ - golang \ - training/namer \ - dockercoins/hasher \ - dockercoins/rng \ - dockercoins/webui \ - dockercoins/worker \ - logstash \ - prom/node-exporter \ - google/cadvisor \ - dockersamples/visualizer \ - nathanleclaire/redisonrails; do + debian:latest \ + ubuntu:latest \ + fedora:latest \ + centos:latest \ + elasticsearch:2 \ + postgres \ + redis \ + alpine \ + registry \ + nicolaka/netshoot \ + jpetazzo/trainingwheels \ + golang \ + training/namer \ + dockercoins/hasher \ + dockercoins/rng \ + dockercoins/webui \ + dockercoins/worker \ + logstash \ + prom/node-exporter \ + google/cadvisor \ + dockersamples/visualizer \ + nathanleclaire/redisonrails; do sudo -u docker docker pull $I done' info "Finished pulling images for $TAG." - info "You may now want to run:" - info "$0 cards $TAG " -} - -wait_until_tag_is_running() { - max_retry=50 - TAG=$1 - COUNT=$2 - i=0 - done_count=0 - while [[ $done_count -lt $COUNT ]]; do - let "i += 1" - info "$(printf "%d/%d instances online" $done_count $COUNT)" - done_count=$(aws ec2 describe-instances \ - --filters "Name=instance-state-name,Values=running" \ - "Name=tag:Name,Values=$TAG" \ - --query "Reservations[*].Instances[*].State.Name" \ - | tr "\t" "\n" \ - | wc -l) - - if [[ $i -gt $max_retry ]]; then - die "Timed out while waiting for instance creation (after $max_retry retries)" - fi - sleep 1 - done } tag_is_reachable() { - TAG=$1 - need_tag $TAG - link_tag $TAG pssh -t 5 true 2>&1 >/dev/null } test_tag() { - TAG=$1 ips_file=tags/$TAG/ips.txt info "Picking a random IP address in $ips_file to run tests." - n=$((1 + $RANDOM % $(wc -l <$ips_file))) - ip=$(head -n $n $ips_file | tail -n 1) + ip=$(shuf -n1 $ips_file) test_vm $ip info "Tests complete." } @@ -572,7 +478,7 @@ sync_keys() { fi } -get_token() { +make_tag() { if [ -z $USER ]; then export USER=anonymous fi @@ -580,6 +486,7 @@ get_token() { } describe_tag() { + FIXME # Display instance details and reachability/status information TAG=$1 need_tag $TAG diff --git a/prepare-vms/lib/infra.sh b/prepare-vms/lib/infra.sh new file mode 100644 index 00000000..08cefbca --- /dev/null +++ b/prepare-vms/lib/infra.sh @@ -0,0 +1,26 @@ +# Default stub functions for infrastructure libraries. +# When loading an infrastructure library, these functions will be overridden. + +infra_list() { + warning "infra_list is unsupported on $INFRACLASS." +} + +infra_quotas() { + warning "infra_quotas is unsupported on $INFRACLASS." +} + +infra_start() { + warning "infra_start is unsupported on $INFRACLASS." +} + +infra_stop() { + warning "infra_stop is unsupported on $INFRACLASS." +} + +infra_quotas() { + warning "infra_quotas is unsupported on $INFRACLASS." +} + +infra_opensg() { + warning "infra_opensg is unsupported on $INFRACLASS." +} diff --git a/prepare-vms/lib/infra/aws.sh b/prepare-vms/lib/infra/aws.sh new file mode 100644 index 00000000..44f8d781 --- /dev/null +++ b/prepare-vms/lib/infra/aws.sh @@ -0,0 +1,208 @@ +infra_list() { + aws_display_tags +} + +infra_quotas() { + greet + + max_instances=$(aws ec2 describe-account-attributes \ + --attribute-names max-instances \ + --query 'AccountAttributes[*][AttributeValues]') + info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances." + + # Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list + # If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive: + # Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/" + info "Available regions:" + aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50 +} + +infra_start() { + COUNT=$1 + + # Print our AWS username, to ease the pain of credential-juggling + greet + + # Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys + key_name=$(sync_keys) + + AMI=$(aws_get_ami) # Retrieve the AWS image ID + if [ -z "$AMI" ]; then + die "I could not find which AMI to use in this region. Try another region?" + fi + AWS_KEY_NAME=$(make_key_name) + + sep "Starting instances" + info " Count: $COUNT" + info " Region: $AWS_DEFAULT_REGION" + info " Token/tag: $TAG" + info " AMI: $AMI" + info " Key name: $AWS_KEY_NAME" + result=$(aws ec2 run-instances \ + --key-name $AWS_KEY_NAME \ + --count $COUNT \ + --instance-type ${AWS_INSTANCE_TYPE-t2.medium} \ + --client-token $TAG \ + --image-id $AMI) + reservation_id=$(echo "$result" | head -1 | awk '{print $2}') + info "Reservation ID: $reservation_id" + sep + + # if instance creation succeeded, we should have some IDs + IDS=$(aws_get_instance_ids_by_client_token $TAG) + if [ -z "$IDS" ]; then + die "Instance creation failed." + fi + + # Tag these new instances with a tag that is the same as the token + aws_tag_instances $TAG $TAG + + # Wait until EC2 API tells us that the instances are running + wait_until_tag_is_running $TAG $COUNT + + aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt +} + +infra_stop() { + aws_kill_instances_by_tag +} + +infra_opensg() { + aws ec2 authorize-security-group-ingress \ + --group-name default \ + --protocol icmp \ + --port -1 \ + --cidr 0.0.0.0/0 + + aws ec2 authorize-security-group-ingress \ + --group-name default \ + --protocol udp \ + --port 0-65535 \ + --cidr 0.0.0.0/0 + + aws ec2 authorize-security-group-ingress \ + --group-name default \ + --protocol tcp \ + --port 0-65535 \ + --cidr 0.0.0.0/0 +} + +wait_until_tag_is_running() { + max_retry=50 + i=0 + done_count=0 + while [[ $done_count -lt $COUNT ]]; do + let "i += 1" + info "$(printf "%d instances online" $done_count)" + done_count=$(aws ec2 describe-instances \ + --filters "Name=instance-state-name,Values=running" \ + "Name=tag:Name,Values=$TAG" \ + --query "Reservations[*].Instances[*].State.Name" \ + | tr "\t" "\n" \ + | wc -l) + + if [[ $i -gt $max_retry ]]; then + die "Timed out while waiting for instance creation (after $max_retry retries)" + fi + sleep 1 + done +} + +aws_display_tags() { + # Print all "Name" tags in our region with their instance count + echo "[#] [Status] [Token] [Tag]" \ + | awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}' + aws ec2 describe-instances \ + --query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \ + | tr -d "\r" \ + | uniq -c \ + | sort -k 3 \ + | awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}' +} + +aws_get_tokens() { + aws ec2 describe-instances --output text \ + --query 'Reservations[*].Instances[*].[ClientToken]' \ + | sort -u +} + +aws_display_instance_statuses_by_tag() { + IDS=$(aws ec2 describe-instances \ + --filters "Name=tag:Name,Values=$TAG" \ + --query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ') + + aws ec2 describe-instance-status \ + --instance-ids $IDS \ + --query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \ + --output table +} + +aws_display_instances_by_tag() { + result=$(aws ec2 describe-instances --output table \ + --filter "Name=tag:Name,Values=$TAG" \ + --query "Reservations[*].Instances[*].[ \ + InstanceId, \ + State.Name, \ + Tags[0].Value, \ + PublicIpAddress, \ + InstanceType \ + ]" + ) + if [[ -z $result ]]; then + die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION." + else + echo "$result" + fi +} + +aws_get_instance_ids_by_filter() { + FILTER=$1 + aws ec2 describe-instances --filters $FILTER \ + --query Reservations[*].Instances[*].InstanceId \ + --output text | tr "\t" "\n" | tr -d "\r" +} + +aws_get_instance_ids_by_client_token() { + TOKEN=$1 + need_tag $TOKEN + aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN +} + +aws_get_instance_ids_by_tag() { + aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG +} + +aws_get_instance_ips_by_tag() { + aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \ + --output text \ + --query "Reservations[*].Instances[*].PublicIpAddress" \ + | tr "\t" "\n" \ + | sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs +} + +aws_kill_instances_by_tag() { + IDS=$(aws_get_instance_ids_by_tag $TAG) + if [ -z "$IDS" ]; then + die "Invalid tag." + fi + + info "Deleting instances with tag $TAG." + + aws ec2 terminate-instances --instance-ids $IDS \ + | grep ^TERMINATINGINSTANCES + + info "Deleted instances with tag $TAG." +} + +aws_tag_instances() { + OLD_TAG_OR_TOKEN=$1 + NEW_TAG=$2 + IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN) + [[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null + IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN) + [[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null +} + +aws_get_ami() { + find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 16.04 -t hvm:ebs -N -q +} diff --git a/prepare-vms/lib/infra/generic.sh b/prepare-vms/lib/infra/generic.sh new file mode 100644 index 00000000..e44ab076 --- /dev/null +++ b/prepare-vms/lib/infra/generic.sh @@ -0,0 +1,7 @@ +infra_start() { + COUNT=$1 + info "You should now run your provisioning commands for $COUNT machines." + info "Once done, put the list of IP addresses in tags/$TAG/ips.txt" + info "(one IP address per line, without any comments or extra lines)." + touch tags/$TAG/ips.txt +} diff --git a/prepare-vms/lib/infra/openstack.sh b/prepare-vms/lib/infra/openstack.sh new file mode 100644 index 00000000..5beae3c2 --- /dev/null +++ b/prepare-vms/lib/infra/openstack.sh @@ -0,0 +1,20 @@ +infra_start() { + COUNT=$1 + + cp terraform/*.tf tags/$TAG + ( + cd tags/$TAG + terraform init + echo prefix = \"$TAG\" >> terraform.tfvars + echo count = \"$COUNT\" >> terraform.tfvars + terraform apply -auto-approve + terraform output ip_addresses > ips.txt + ) +} + +infra_stop() { + ( + cd tags/$TAG + terraform destroy -auto-approve + ) +} \ No newline at end of file diff --git a/prepare-vms/lib/ips-txt-to-html.py b/prepare-vms/lib/ips-txt-to-html.py index f95fe3d9..fd576afc 100755 --- a/prepare-vms/lib/ips-txt-to-html.py +++ b/prepare-vms/lib/ips-txt-to-html.py @@ -31,7 +31,13 @@ while ips: clusters.append(cluster) template_file_name = SETTINGS["cards_template"] -template = jinja2.Template(open(template_file_name).read()) +template_file_path = os.path.join( + os.path.dirname(__file__), + "..", + "templates", + template_file_name + ) +template = jinja2.Template(open(template_file_path).read()) with open("ips.html", "w") as f: f.write(template.render(clusters=clusters, **SETTINGS)) print("Generated ips.html") diff --git a/prepare-vms/lib/pssh.sh b/prepare-vms/lib/pssh.sh index cf13f48b..abb94539 100644 --- a/prepare-vms/lib/pssh.sh +++ b/prepare-vms/lib/pssh.sh @@ -1,12 +1,17 @@ # This file can be sourced in order to directly run commands on -# a batch of VMs whose IPs are located in ips.txt of the directory in which +# a group of VMs whose IPs are located in ips.txt of the directory in which # the command is run. pssh() { - HOSTFILE="ips.txt" + if [ -z "$TAG" ]; then + >/dev/stderr echo "Variable \$TAG is not set." + return + fi + + HOSTFILE="tags/$TAG/ips.txt" [ -f $HOSTFILE ] || { - >/dev/stderr echo "No hostfile found at $HOSTFILE" + >/dev/stderr echo "Hostfile $HOSTFILE not found." return } diff --git a/prepare-vms/settings/enix.yaml b/prepare-vms/settings/enix.yaml index 37d03579..be174893 100644 --- a/prepare-vms/settings/enix.yaml +++ b/prepare-vms/settings/enix.yaml @@ -2,7 +2,7 @@ clustersize: 5 # Jinja2 template to use to generate ready-to-cut cards -cards_template: settings/enix.html +cards_template: enix.html # Use "Letter" in the US, and "A4" everywhere else paper_size: A4 diff --git a/prepare-vms/settings/kube101.yaml b/prepare-vms/settings/kube101.yaml index fbbfcccb..8ae69278 100644 --- a/prepare-vms/settings/kube101.yaml +++ b/prepare-vms/settings/kube101.yaml @@ -4,7 +4,7 @@ clustersize: 3 # Jinja2 template to use to generate ready-to-cut cards -cards_template: settings/kube101.html +cards_template: kube101.html # Use "Letter" in the US, and "A4" everywhere else paper_size: Letter diff --git a/prepare-vms/cards.html b/prepare-vms/templates/cards.html similarity index 100% rename from prepare-vms/cards.html rename to prepare-vms/templates/cards.html diff --git a/prepare-vms/clusters.csv b/prepare-vms/templates/clusters.csv similarity index 100% rename from prepare-vms/clusters.csv rename to prepare-vms/templates/clusters.csv diff --git a/prepare-vms/settings/enix.html b/prepare-vms/templates/enix.html similarity index 100% rename from prepare-vms/settings/enix.html rename to prepare-vms/templates/enix.html diff --git a/prepare-vms/settings/kube101.html b/prepare-vms/templates/kube101.html similarity index 100% rename from prepare-vms/settings/kube101.html rename to prepare-vms/templates/kube101.html diff --git a/prepare-vms/terraform/keypair.tf b/prepare-vms/terraform/keypair.tf new file mode 100644 index 00000000..5b644a7c --- /dev/null +++ b/prepare-vms/terraform/keypair.tf @@ -0,0 +1,5 @@ +resource "openstack_compute_keypair_v2" "ssh_deploy_key" { + name = "${var.prefix}" + public_key = "${file("~/.ssh/id_rsa.pub")}" +} + diff --git a/prepare-vms/terraform/machines.tf b/prepare-vms/terraform/machines.tf new file mode 100644 index 00000000..44b5b8c2 --- /dev/null +++ b/prepare-vms/terraform/machines.tf @@ -0,0 +1,32 @@ +resource "openstack_compute_instance_v2" "machine" { + count = "${var.count}" + name = "${format("%s-%04d", "${var.prefix}", count.index+1)}" + image_name = "Ubuntu 16.04 (Xenial Xerus)" + flavor_name = "${var.flavor}" + security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"] + key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}" + + network { + name = "${openstack_networking_network_v2.internal.name}" + fixed_ip_v4 = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}" + } +} + +resource "openstack_compute_floatingip_v2" "machine" { + count = "${var.count}" + # This is something provided to us by Enix when our tenant was provisioned. + pool = "Public Floating" +} + +resource "openstack_compute_floatingip_associate_v2" "machine" { + count = "${var.count}" + floating_ip = "${openstack_compute_floatingip_v2.machine.*.address[count.index]}" + instance_id = "${openstack_compute_instance_v2.machine.*.id[count.index]}" + fixed_ip = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}" +} + +output "ip_addresses" { + value = "${join("\n", openstack_compute_floatingip_v2.machine.*.address)}" +} + +variable "flavor" {} diff --git a/prepare-vms/terraform/network.tf b/prepare-vms/terraform/network.tf new file mode 100644 index 00000000..62d6b3cc --- /dev/null +++ b/prepare-vms/terraform/network.tf @@ -0,0 +1,23 @@ +resource "openstack_networking_network_v2" "internal" { + name = "${var.prefix}" +} + +resource "openstack_networking_subnet_v2" "internal" { + name = "${var.prefix}" + network_id = "${openstack_networking_network_v2.internal.id}" + cidr = "10.10.0.0/16" + ip_version = 4 + dns_nameservers = ["1.1.1.1"] +} + +resource "openstack_networking_router_v2" "router" { + name = "${var.prefix}" + external_network_id = "15f0c299-1f50-42a6-9aff-63ea5b75f3fc" +} + +resource "openstack_networking_router_interface_v2" "router_internal" { + router_id = "${openstack_networking_router_v2.router.id}" + subnet_id = "${openstack_networking_subnet_v2.internal.id}" +} + + diff --git a/prepare-vms/terraform/provider.tf b/prepare-vms/terraform/provider.tf new file mode 100644 index 00000000..9e71bd53 --- /dev/null +++ b/prepare-vms/terraform/provider.tf @@ -0,0 +1,13 @@ +provider "openstack" { + user_name = "${var.user}" + tenant_name = "${var.tenant}" + domain_name = "${var.domain}" + password = "${var.password}" + auth_url = "${var.auth_url}" +} + +variable "user" {} +variable "tenant" {} +variable "domain" {} +variable "password" {} +variable "auth_url" {} diff --git a/prepare-vms/terraform/secgroup.tf b/prepare-vms/terraform/secgroup.tf new file mode 100644 index 00000000..35b2e020 --- /dev/null +++ b/prepare-vms/terraform/secgroup.tf @@ -0,0 +1,12 @@ +resource "openstack_networking_secgroup_v2" "full_access" { + name = "${var.prefix} - full access" +} + +resource "openstack_networking_secgroup_rule_v2" "full_access" { + direction = "ingress" + ethertype = "IPv4" + protocol = "" + remote_ip_prefix = "0.0.0.0/0" + security_group_id = "${openstack_networking_secgroup_v2.full_access.id}" +} + diff --git a/prepare-vms/terraform/vars.tf b/prepare-vms/terraform/vars.tf new file mode 100644 index 00000000..61c3e7e4 --- /dev/null +++ b/prepare-vms/terraform/vars.tf @@ -0,0 +1,8 @@ +variable "prefix" { + type = "string" +} + +variable "count" { + type = "string" +} + diff --git a/prepare-vms/workshopctl b/prepare-vms/workshopctl index 07102ed1..870d3e06 100755 --- a/prepare-vms/workshopctl +++ b/prepare-vms/workshopctl @@ -1,20 +1,19 @@ #!/bin/bash -# Get the script's real directory, whether we're being called directly or via a symlink +# Get the script's real directory. +# This should work whether we're being called directly or via a symlink. if [ -L "$0" ]; then export SCRIPT_DIR=$(dirname $(readlink "$0")) else export SCRIPT_DIR=$(dirname "$0") fi -# Load all scriptlets +# Load all scriptlets. cd "$SCRIPT_DIR" for lib in lib/*.sh; do . $lib done -TRAINER_IMAGE="preparevms_prepare-vms" - DEPENDENCIES=" aws ssh @@ -25,49 +24,26 @@ DEPENDENCIES=" man " -ENVVARS=" - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY - AWS_DEFAULT_REGION - SSH_AUTH_SOCK - " +# Check for missing dependencies, and issue a warning if necessary. +missing=0 +for dependency in $DEPENDENCIES; do + if ! command -v $dependency >/dev/null; then + warning "Dependency $dependency could not be found." + missing=1 + fi +done +if [ $missing = 1 ]; then + warning "At least one dependency is missing. Install it or try the image wrapper." +fi -check_envvars() { - status=0 - for envvar in $ENVVARS; do - if [ -z "${!envvar}" ]; then - error "Environment variable $envvar is not set." - if [ "$envvar" = "SSH_AUTH_SOCK" ]; then - error "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?" - fi - status=1 - fi - done - return $status -} +# Check if SSH_AUTH_SOCK is set. +# (If it's not, deployment will almost certainly fail.) +if [ -z "${SSH_AUTH_SOCK}" ]; then + warning "Environment variable SSH_AUTH_SOCK is not set." + warning "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?" +fi -check_dependencies() { - status=0 - for dependency in $DEPENDENCIES; do - if ! command -v $dependency >/dev/null; then - warning "Dependency $dependency could not be found." - status=1 - fi - done - return $status -} - -check_image() { - docker inspect $TRAINER_IMAGE >/dev/null 2>&1 -} - -check_envvars \ - || die "Please set all required environment variables." - -check_dependencies \ - || warning "At least one dependency is missing. Install it or try the image wrapper." - -# Now check which command was invoked and execute it +# Now check which command was invoked and execute it. if [ "$1" ]; then cmd="$1" shift @@ -77,6 +53,3 @@ fi fun=_cmd_$cmd type -t $fun | grep -q function || die "Invalid command: $cmd" $fun "$@" - -# export SSH_AUTH_DIRNAME=$(dirname $SSH_AUTH_SOCK) -# docker-compose run prepare-vms "$@" From e4e0386efd5b1aff840aa4c093508d0c2cc166fa Mon Sep 17 00:00:00 2001 From: Jerome Petazzoni Date: Fri, 28 Sep 2018 14:24:20 -0500 Subject: [PATCH 2/2] Add kubectl, kubens, kube_ps1 kubectl and kubens are added as kctl and kns (to avoid clashing with completion for kubectl). Their completion is added too (so you can do 'kns kube-sy[TAB]' to switch to kube-system). kube_ps1 is added and enabled. The default prompt for the docker user now shows the current context and namespace. --- prepare-vms/lib/commands.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/prepare-vms/lib/commands.sh b/prepare-vms/lib/commands.sh index 23ecfcfc..bac7f9ef 100644 --- a/prepare-vms/lib/commands.sh +++ b/prepare-vms/lib/commands.sh @@ -143,6 +143,23 @@ _cmd_kube() { sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443 fi" + # Install kubectx and kubens + pssh " + [ -d kubectx ] || git clone https://github.com/ahmetb/kubectx && + sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx && + sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns && + sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d && + [ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 && + sudo -u docker sed s/docker-prompt/kube_ps1/ /home/docker/.bashrc && + sudo -u docker tee -a /home/docker/.bashrc <