mirror of
https://github.com/stefanprodan/podinfo.git
synced 2026-04-07 03:26:54 +00:00
Compare commits
337 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
070535aa6d | ||
|
|
33fa95b452 | ||
|
|
b0d5fe73c6 | ||
|
|
f8078c6d9c | ||
|
|
c91289539f | ||
|
|
9e4271c32f | ||
|
|
e107323152 | ||
|
|
a5cb68c49f | ||
|
|
745022dcb3 | ||
|
|
7c88a31f52 | ||
|
|
89cd76ceca | ||
|
|
344813ab4c | ||
|
|
ad9cc3d42c | ||
|
|
71bc8b76ec | ||
|
|
67fc5cf534 | ||
|
|
46d18e955a | ||
|
|
1e40803bfb | ||
|
|
13f116b808 | ||
|
|
ccdea85f0a | ||
|
|
095bfb37f3 | ||
|
|
9e6a339fef | ||
|
|
a1b2bc79b8 | ||
|
|
37522319db | ||
|
|
e4a96f95c8 | ||
|
|
a888f4d135 | ||
|
|
d2bfab977c | ||
|
|
e4b5093912 | ||
|
|
a9565e1b27 | ||
|
|
40bbbc12b3 | ||
|
|
f349616a3f | ||
|
|
3cb08bab98 | ||
|
|
fd63e6ef74 | ||
|
|
d9e61b07cc | ||
|
|
7ddb9ea518 | ||
|
|
41ec5f60fb | ||
|
|
8613002a6f | ||
|
|
6f98421d8b | ||
|
|
9c36d89171 | ||
|
|
53c814115d | ||
|
|
7a2877f879 | ||
|
|
2a8de2bbca | ||
|
|
be7f93c668 | ||
|
|
b33567f025 | ||
|
|
2451f71581 | ||
|
|
50d9e3e07c | ||
|
|
7601e8e35b | ||
|
|
b541f90a3d | ||
|
|
275c8ffaf6 | ||
|
|
44fd560f3b | ||
|
|
c86e56a991 | ||
|
|
f04a6ec789 | ||
|
|
a68db07056 | ||
|
|
b15f8fe4ab | ||
|
|
292d4e77fd | ||
|
|
dde1a73968 | ||
|
|
52ca29260c | ||
|
|
45da3ab58d | ||
|
|
e1972b7afc | ||
|
|
a120ff1cc4 | ||
|
|
545edabf5c | ||
|
|
2589634047 | ||
|
|
d439dc2edf | ||
|
|
62546e0a69 | ||
|
|
80f46daa4a | ||
|
|
b8bc6f76e4 | ||
|
|
4ed0f6030e | ||
|
|
9653561ccf | ||
|
|
3a2e56dc7d | ||
|
|
955410a94c | ||
|
|
021401fd43 | ||
|
|
a9f6446865 | ||
|
|
803456eddd | ||
|
|
2a7425f6e2 | ||
|
|
b51d46649a | ||
|
|
34a2b2a571 | ||
|
|
d89cf7db10 | ||
|
|
80d5183749 | ||
|
|
2651dae114 | ||
|
|
5a68383db6 | ||
|
|
e4c3b94f0a | ||
|
|
c1689ad24a | ||
|
|
a3ae18b304 | ||
|
|
a7be119f20 | ||
|
|
c59466012f | ||
|
|
cd7ad53eae | ||
|
|
25fff58ba5 | ||
|
|
d1001f0eb5 | ||
|
|
0fe07bef97 | ||
|
|
a044694622 | ||
|
|
efe5de74c0 | ||
|
|
8aa52e8afd | ||
|
|
874791526a | ||
|
|
037eaa9d63 | ||
|
|
5920bfdbe3 | ||
|
|
d298670b09 | ||
|
|
5a05ae2f98 | ||
|
|
0fe3652b5b | ||
|
|
c60a8f7fee | ||
|
|
0b7676a2db | ||
|
|
0ed8c210c1 | ||
|
|
f1fe3f3d2b | ||
|
|
52b5958a86 | ||
|
|
90af761766 | ||
|
|
3dc9ac574e | ||
|
|
6c8a85a5ab | ||
|
|
c9dc78f29c | ||
|
|
6a9b0253ac | ||
|
|
198211e20b | ||
|
|
e1ca9e227d | ||
|
|
4fc593f42c | ||
|
|
18af1ea3a6 | ||
|
|
2e9917a6b9 | ||
|
|
bf00d07b17 | ||
|
|
aab8e464e8 | ||
|
|
1475a2da00 | ||
|
|
44f588dd4c | ||
|
|
951d82abb9 | ||
|
|
3301f6f8d4 | ||
|
|
0339d3beb0 | ||
|
|
d1b77c97b8 | ||
|
|
bfa3aaf9ac | ||
|
|
43df2d19c6 | ||
|
|
7181351c89 | ||
|
|
1c3bf10de2 | ||
|
|
c28c11d4a5 | ||
|
|
5c2f9a19d6 | ||
|
|
7d5200a78a | ||
|
|
66b8948473 | ||
|
|
db04ce117b | ||
|
|
5142c39a8e | ||
|
|
001486ac0a | ||
|
|
ed553135b2 | ||
|
|
c21c24b2fd | ||
|
|
4dbbfa9239 | ||
|
|
bc8ff9b412 | ||
|
|
cdf9b06b86 | ||
|
|
431ab9e19e | ||
|
|
1273d3745e | ||
|
|
caa49b96aa | ||
|
|
319e444ddf | ||
|
|
0529fff9aa | ||
|
|
0fc239aaca | ||
|
|
f0b19b63e9 | ||
|
|
d10ba4ac43 | ||
|
|
7a2dca6798 | ||
|
|
62ccb1b67e | ||
|
|
579284c775 | ||
|
|
a4948e16dd | ||
|
|
995dcb5042 | ||
|
|
cbf1d671df | ||
|
|
f6987a0a09 | ||
|
|
ea93f3ed9f | ||
|
|
2fc253a7c7 | ||
|
|
c83e19a217 | ||
|
|
a9a1252a22 | ||
|
|
046a9a4852 | ||
|
|
4d78abdad8 | ||
|
|
f8b32fa130 | ||
|
|
a30fb535de | ||
|
|
8d662334a2 | ||
|
|
4ed9271783 | ||
|
|
97157694be | ||
|
|
bf92728234 | ||
|
|
bd31f8b23e | ||
|
|
f7c8061ac0 | ||
|
|
943f4e26ab | ||
|
|
f44909ef77 | ||
|
|
1af24bd3cd | ||
|
|
14ef95dac6 | ||
|
|
08a26cef24 | ||
|
|
8013c0bed0 | ||
|
|
6aa4303e08 | ||
|
|
f34fbacf13 | ||
|
|
b7701f6ae7 | ||
|
|
d3208cd8ac | ||
|
|
7d4c89d965 | ||
|
|
3b5ac61680 | ||
|
|
e8e2ac2b34 | ||
|
|
ef571a9b1b | ||
|
|
3d9cabcea4 | ||
|
|
ae4120a24e | ||
|
|
97d36bd8bb | ||
|
|
18a22d1b94 | ||
|
|
083de34465 | ||
|
|
64b85dc30d | ||
|
|
fed964e223 | ||
|
|
efb6a76242 | ||
|
|
fb199b72a1 | ||
|
|
ce117e1706 | ||
|
|
23e67f9923 | ||
|
|
30b030a685 | ||
|
|
0fe4a7a3a9 | ||
|
|
982063ab9b | ||
|
|
c3256bd18f | ||
|
|
d947fc5b2c | ||
|
|
dc6d64137d | ||
|
|
f3c1ee7dbc | ||
|
|
6b6dd86fea | ||
|
|
02e5f233d0 | ||
|
|
b89f46ac04 | ||
|
|
59cd692141 | ||
|
|
bcd61428d1 | ||
|
|
f8ec9c0947 | ||
|
|
6c98fbf1f4 | ||
|
|
54f6d9f74d | ||
|
|
1d35304d9d | ||
|
|
457a56f71a | ||
|
|
fbcab6cf56 | ||
|
|
0126282669 | ||
|
|
ff1fb39f43 | ||
|
|
84f0e1c9e2 | ||
|
|
3eb4cc90f9 | ||
|
|
b6c3d36bde | ||
|
|
a8a85e6aae | ||
|
|
79b2d784bf | ||
|
|
bfd35f6cc0 | ||
|
|
f1775ba090 | ||
|
|
7a2d59de8e | ||
|
|
8191871761 | ||
|
|
36bb719b1c | ||
|
|
ecd204b15e | ||
|
|
979fd669df | ||
|
|
feac686e60 | ||
|
|
d362dc5f81 | ||
|
|
593ccaa0cd | ||
|
|
0f098cf0f1 | ||
|
|
2ddbc03371 | ||
|
|
f2d95bbf80 | ||
|
|
7d18ec68b3 | ||
|
|
774d34c1dd | ||
|
|
f13d006993 | ||
|
|
aeeb146c2a | ||
|
|
11bd74eff2 | ||
|
|
af6d11fd33 | ||
|
|
49746fe2fb | ||
|
|
da24d729bb | ||
|
|
449fcca3a9 | ||
|
|
2b0a742974 | ||
|
|
153f4dce45 | ||
|
|
4c8d11cc3e | ||
|
|
08415ce2ce | ||
|
|
d26b7a96d9 | ||
|
|
3c897b8bd7 | ||
|
|
511ab87a18 | ||
|
|
21922197b5 | ||
|
|
7ea943525f | ||
|
|
57ff4465cd | ||
|
|
a86ef1fdb6 | ||
|
|
ddf1b80e1b | ||
|
|
896aceb240 | ||
|
|
7996f76e71 | ||
|
|
8b04a8f502 | ||
|
|
8a6a4e8901 | ||
|
|
cf8531c224 | ||
|
|
d1574a6601 | ||
|
|
75d93e0c54 | ||
|
|
7622dfb74f | ||
|
|
85a26ed71e | ||
|
|
81b22f08f8 | ||
|
|
7d9e3afde7 | ||
|
|
3d2028a124 | ||
|
|
1b56648f5b | ||
|
|
3a704215a4 | ||
|
|
25aaeff13c | ||
|
|
3b93a3445e | ||
|
|
a6cc3d2ef9 | ||
|
|
718d8ba4e0 | ||
|
|
24ceb25930 | ||
|
|
fc8dfc7678 | ||
|
|
8e656fdfd0 | ||
|
|
a945842e9b | ||
|
|
09a743f5c2 | ||
|
|
c44a58602e | ||
|
|
2ee11bf6b2 | ||
|
|
70b0e92555 | ||
|
|
7a78c93a49 | ||
|
|
be915d44cc | ||
|
|
82f2f9ecf9 | ||
|
|
035f78edc1 | ||
|
|
91c61d4fa5 | ||
|
|
e673dae20d | ||
|
|
adfff4a923 | ||
|
|
4db9d5a1ed | ||
|
|
92114c05c9 | ||
|
|
62fa684440 | ||
|
|
2aba7a3ed2 | ||
|
|
fda68019ea | ||
|
|
39dde13700 | ||
|
|
2485a10189 | ||
|
|
6c3569e131 | ||
|
|
9b3a033845 | ||
|
|
f02ebc267a | ||
|
|
01631a0a43 | ||
|
|
a1e5cb77fd | ||
|
|
cdc6765b51 | ||
|
|
ff9cf93b14 | ||
|
|
5665149191 | ||
|
|
5a1f009200 | ||
|
|
b6be95ee77 | ||
|
|
ad22fdb933 | ||
|
|
9b287dbf5c | ||
|
|
e81277f217 | ||
|
|
e24c83525a | ||
|
|
65d03a557b | ||
|
|
e93d0682fb | ||
|
|
a1bedc8c43 | ||
|
|
07d3192afb | ||
|
|
ee10c878a0 | ||
|
|
db9bf53e4f | ||
|
|
53d2609d8f | ||
|
|
b34653912d | ||
|
|
1a2029f74d | ||
|
|
68babf42e1 | ||
|
|
1330decdaa | ||
|
|
1682f79478 | ||
|
|
93dee060dc | ||
|
|
797a4200dd | ||
|
|
0c84164b65 | ||
|
|
b104769f20 | ||
|
|
4acfdba296 | ||
|
|
b5719fea3f | ||
|
|
00106faf8d | ||
|
|
88f417ee1c | ||
|
|
94441ef933 | ||
|
|
b1871f827b | ||
|
|
753799812a | ||
|
|
6aa5cbbaee | ||
|
|
4efde133e5 | ||
|
|
60c0601128 | ||
|
|
d4882b4212 | ||
|
|
e4c765160a | ||
|
|
130e1dac8e | ||
|
|
510864654f | ||
|
|
310643b0df | ||
|
|
6de537a315 | ||
|
|
5d992a92bb | ||
|
|
0aade8c049 |
5
.circleci/config.yml
Normal file
5
.circleci/config.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
branches:
|
||||
ignore: gh-pages
|
||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -1,17 +0,0 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
.idea/
|
||||
release/
|
||||
build/
|
||||
43
.travis.yml
43
.travis.yml
@@ -1,43 +0,0 @@
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- docker-ce
|
||||
|
||||
before_install:
|
||||
- make dep
|
||||
# - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
# - mkdir -p .bin; mv ./kubectl .bin/kubectl && chmod +x .bin/kubectl
|
||||
# - export PATH="$TRAVIS_BUILD_DIR/.bin:$PATH"
|
||||
# - wget https://cdn.rawgit.com/Mirantis/kubeadm-dind-cluster/master/fixed/dind-cluster-v1.8.sh && chmod +x dind-cluster-v1.8.sh && ./dind-cluster-v1.8.sh up
|
||||
# - export PATH="$HOME/.kubeadm-dind-cluster:$PATH"
|
||||
|
||||
script:
|
||||
- make test
|
||||
- make build docker-build
|
||||
# - kubectl get nodes
|
||||
# - kubectl run podinfo --image=podinfo:latest --port=9898
|
||||
# - sleep 5
|
||||
# - kubectl get pods
|
||||
|
||||
after_success:
|
||||
- if [ -z "$DOCKER_USER" ]; then
|
||||
echo "PR build, skipping Docker Hub push";
|
||||
else
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASS;
|
||||
make docker-push;
|
||||
fi
|
||||
- if [ -z "$QUAY_USER" ]; then
|
||||
echo "PR build, skipping Quay push";
|
||||
else
|
||||
docker login -u $QUAY_USER -p $QUAY_PASS quay.io;
|
||||
make quay-push;
|
||||
fi
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,16 +0,0 @@
|
||||
FROM alpine:3.7
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add \
|
||||
curl openssl netcat-openbsd
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
ADD podinfo .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
CMD ["./podinfo"]
|
||||
@@ -1,5 +0,0 @@
|
||||
FROM alpine:3.7
|
||||
|
||||
ADD podinfo /podinfo
|
||||
|
||||
CMD ["./podinfo"]
|
||||
@@ -1,32 +0,0 @@
|
||||
FROM golang:1.9 as builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/stefanprodan/k8s-podinfo/
|
||||
|
||||
WORKDIR /go/src/github.com/stefanprodan/k8s-podinfo
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go test $(go list ./... | grep -v integration | grep -v /vendor/ | grep -v /template/) -cover
|
||||
|
||||
RUN gofmt -l -d $(find . -type f -name '*.go' -not -path "./vendor/*") && \
|
||||
GIT_COMMIT=$(git rev-list -1 HEAD) && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w \
|
||||
-X github.com/stefanprodan/k8s-podinfo/pkg/version.GITCOMMIT=${GIT_COMMIT}" \
|
||||
-a -installsuffix cgo -o podinfo ./cmd/podinfo
|
||||
|
||||
FROM alpine:3.7
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add \
|
||||
curl openssl netcat-openbsd
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=builder /go/src/github.com/stefanprodan/k8s-podinfo/podinfo .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
CMD ["./podinfo"]
|
||||
85
Gopkg.lock
generated
85
Gopkg.lock
generated
@@ -1,85 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
]
|
||||
revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/rs/zerolog"
|
||||
packages = [
|
||||
".",
|
||||
"internal/json",
|
||||
"log"
|
||||
]
|
||||
revision = "56a970de510213e50dbaa39ad73ac07c9ec75606"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
|
||||
version = "v2.1.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "4f1e9200a330a22000fc47075b59e68e57c94bcb3d9f444f3ce85cab77e07fde"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
20
Gopkg.toml
20
Gopkg.toml
@@ -1,20 +0,0 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rs/zerolog"
|
||||
version = "1.5.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "2.1.1"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
21
LICENSE
21
LICENSE
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 Stefan Prodan
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
113
Makefile
113
Makefile
@@ -1,113 +0,0 @@
|
||||
# Makefile for releasing Alpine multi-arch Docker images
|
||||
#
|
||||
# The release version is controlled from pkg/version
|
||||
|
||||
EMPTY:=
|
||||
SPACE:=$(EMPTY) $(EMPTY)
|
||||
COMMA:=$(EMPTY),$(EMPTY)
|
||||
NAME:=podinfo
|
||||
DOCKER_REPOSITORY:=stefanprodan
|
||||
DOCKER_IMAGE_NAME:=$(DOCKER_REPOSITORY)/$(NAME)
|
||||
GITREPO:=github.com/stefanprodan/k8s-podinfo
|
||||
GITCOMMIT:=$(shell git describe --dirty --always)
|
||||
VERSION:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"')
|
||||
LINUX_ARCH:=arm arm64 ppc64le s390x amd64
|
||||
PLATFORMS:=$(subst $(SPACE),$(COMMA),$(foreach arch,$(LINUX_ARCH),linux/$(arch)))
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
@echo Cleaning old builds
|
||||
@rm -rf build && mkdir build
|
||||
@echo Building: linux/$(LINUX_ARCH) $(VERSION) ;\
|
||||
for arch in $(LINUX_ARCH); do \
|
||||
mkdir -p build/linux/$$arch && CGO_ENABLED=0 GOOS=linux GOARCH=$$arch go build -ldflags="-s -w -X $(GITREPO)/pkg/version.GITCOMMIT=$(GITCOMMIT)" -o build/linux/$$arch/$(NAME) ./cmd/$(NAME) ;\
|
||||
done
|
||||
|
||||
.PHONY: tar
|
||||
tar: build
|
||||
@echo Cleaning old releases
|
||||
@rm -rf release && mkdir release
|
||||
for arch in $(LINUX_ARCH); do \
|
||||
tar -zcf release/$(NAME)_$(VERSION)_linux_$$arch.tgz -C build/linux/$$arch $(NAME) ;\
|
||||
done
|
||||
|
||||
.PHONY: docker-build
|
||||
docker-build: tar
|
||||
# Steps:
|
||||
# 1. Copy appropriate podinfo binary to build/docker/linux/<arch>
|
||||
# 2. Copy Dockerfile to build/docker/linux/<arch>
|
||||
# 3. Replace base image from alpine:latest to <arch>/alpine:latest
|
||||
# 4. Comment RUN in Dockerfile
|
||||
# <arch>:
|
||||
# arm: arm32v6
|
||||
# arm64: arm64v8
|
||||
rm -rf build/docker
|
||||
@for arch in $(LINUX_ARCH); do \
|
||||
mkdir -p build/docker/linux/$$arch ;\
|
||||
tar -xzf release/$(NAME)_$(VERSION)_linux_$$arch.tgz -C build/docker/linux/$$arch ;\
|
||||
if [ $$arch == amd64 ]; then \
|
||||
cp Dockerfile build/docker/linux/$$arch ;\
|
||||
cp Dockerfile build/docker/linux/$$arch/Dockerfile.in ;\
|
||||
else \
|
||||
cp Dockerfile.build build/docker/linux/$$arch/Dockerfile ;\
|
||||
cp Dockerfile.build build/docker/linux/$$arch/Dockerfile.in ;\
|
||||
case $$arch in \
|
||||
arm) \
|
||||
BASEIMAGE=arm32v6 ;\
|
||||
;; \
|
||||
arm64) \
|
||||
BASEIMAGE=arm64v8 ;\
|
||||
;; \
|
||||
*) \
|
||||
BASEIMAGE=$$arch ;\
|
||||
;; \
|
||||
esac ;\
|
||||
sed -e "s/alpine:latest/$$BASEIMAGE\\/alpine:latest/" -e "s/^\\s*RUN/#RUN/" build/docker/linux/$$arch/Dockerfile.in > build/docker/linux/$$arch/Dockerfile ;\
|
||||
fi ;\
|
||||
docker build -t $(NAME) build/docker/linux/$$arch ;\
|
||||
docker tag $(NAME) $(DOCKER_IMAGE_NAME):$(NAME)-$$arch ;\
|
||||
done
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push:
|
||||
@echo Pushing: $(VERSION) to $(DOCKER_IMAGE_NAME)
|
||||
for arch in $(LINUX_ARCH); do \
|
||||
docker push $(DOCKER_IMAGE_NAME):$(NAME)-$$arch ;\
|
||||
done
|
||||
manifest-tool push from-args --platforms $(PLATFORMS) --template $(DOCKER_IMAGE_NAME):podinfo-ARCH --target $(DOCKER_IMAGE_NAME):$(VERSION)
|
||||
manifest-tool push from-args --platforms $(PLATFORMS) --template $(DOCKER_IMAGE_NAME):podinfo-ARCH --target $(DOCKER_IMAGE_NAME):latest
|
||||
|
||||
.PHONY: quay-push
|
||||
quay-push:
|
||||
@echo Pushing: $(VERSION) to quay.io/$(DOCKER_IMAGE_NAME):$(VERSION)
|
||||
@cd build/docker/linux/amd64/ ; docker build -t quay.io/$(DOCKER_IMAGE_NAME):$(VERSION) . ; docker push quay.io/$(DOCKER_IMAGE_NAME):$(VERSION)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf release
|
||||
rm -rf build
|
||||
|
||||
.PHONY: gcr-build
|
||||
gcr-build:
|
||||
docker build -t gcr.io/$(DOCKER_IMAGE_NAME):$(VERSION) -f Dockerfile.ci .
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
cd pkg/server ; go test -v -race ./...
|
||||
|
||||
.PHONY: dep
|
||||
dep:
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
go get -u github.com/estesp/manifest-tool
|
||||
|
||||
.PHONY: charts
|
||||
charts:
|
||||
cd charts/ && helm package podinfo/
|
||||
mv charts/podinfo-0.1.0.tgz docs/
|
||||
cd charts/ && helm package ambassador/
|
||||
mv charts/ambassador-0.1.0.tgz docs/
|
||||
cd charts/ && helm package grafana/
|
||||
mv charts/grafana-0.1.0.tgz docs/
|
||||
cd charts/ && helm package ngrok/
|
||||
mv charts/ngrok-0.1.0.tgz docs/
|
||||
helm repo index docs --url https://stefanprodan.github.io/k8s-podinfo --merge ./docs/index.yaml
|
||||
43
README.md
43
README.md
@@ -1,43 +0,0 @@
|
||||
# k8s-podinfo
|
||||
|
||||
Podinfo is a tiny web application made with Go
|
||||
that showcases best practices of running microservices in Kubernetes.
|
||||
|
||||
Specifications:
|
||||
|
||||
* Multi-arch build and release automation (Make/TravisCI)
|
||||
* Multi-platform Docker image (amd64/arm/arm64/ppc64le/s390x)
|
||||
* Health checks (readiness and liveness)
|
||||
* Graceful shutdown on interrupt signals
|
||||
* Prometheus instrumentation (RED metrics)
|
||||
* Dependency management with golang/dep
|
||||
* Structured logging with zerolog
|
||||
* Error handling with pkg/errors
|
||||
* Helm chart
|
||||
|
||||
Web API:
|
||||
|
||||
* `GET /` prints runtime information, environment variables, labels and annotations
|
||||
* `GET /version` prints podinfo version and git commit hash
|
||||
* `GET /metrics` http requests duration and Go runtime metrics
|
||||
* `GET /healthz` used by Kubernetes liveness probe
|
||||
* `GET /readyz` used by Kubernetes readiness probe
|
||||
* `POST /readyz/enable` signals the Kubernetes LB that this instance is ready to receive traffic
|
||||
* `POST /readyz/disable` signals the Kubernetes LB to stop sending requests to this instance
|
||||
* `GET /error` returns code 500 and logs the error
|
||||
* `GET /panic` crashes the process with exit code 255
|
||||
* `POST /echo` echos the posted content, logs the SHA1 hash of the content
|
||||
* `GET /echoheaders` prints the request HTTP headers
|
||||
* `POST /job` long running job, json body: `{"wait":2}`
|
||||
* `POST /write` writes the posted content to disk at /data/hash and returns the SHA1 hash of the content
|
||||
* `POST /read` receives a SHA1 hash and returns the content of the file /data/hash if exists
|
||||
* `POST /backend` forwards the call to the backend service on `http://backend-podinfo:9898/echo`
|
||||
|
||||
### Guides
|
||||
|
||||
* [Deploy and upgrade with Helm](docs/1-deploy.md)
|
||||
* [Horizontal Pod Auto-scaling](docs/2-autoscaling.md)
|
||||
* [Monitoring and alerting with Prometheus](docs/3-monitoring.md)
|
||||
* [StatefulSets with local persistent volumes](docs/4-statefulsets.md)
|
||||
* [Canary Deployments and A/B Testing](docs/5-canary.md)
|
||||
* [Expose Kubernetes services over HTTPS with Ngrok](docs/6-ngrok.md)
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: v1
|
||||
appVersion: "0.29.0"
|
||||
description: A Helm chart for Datawire Ambassador
|
||||
name: ambassador
|
||||
version: 0.1.0
|
||||
sources:
|
||||
- https://github.com/datawire/ambassador
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
engine: gotpl
|
||||
@@ -1,63 +0,0 @@
|
||||
# Ambassador
|
||||
|
||||
Ambassador is an open source, Kubernetes-native [microservices API gateway](https://www.getambassador.io/about/microservices-api-gateways) built on the [Envoy Proxy](https://www.envoyproxy.io/).
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```console
|
||||
$ helm install stable/ambassador
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps an [Ambassador](https://www.getambassador.io) deployment on
|
||||
a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.7+
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
$ helm install --name my-release stable/ambassador
|
||||
```
|
||||
|
||||
The command deploys Ambassador API gateway on the Kubernetes cluster in the default configuration.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Ambassador chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------- | ------------------------------------------ | ---------------------------------------------------------- |
|
||||
| `image` | Image | `quay.io/datawire/ambassador`
|
||||
| `imageTag` | Image tag | `0.29.0`
|
||||
| `imagePullPolicy` | Image pull policy | `IfNotPresent`
|
||||
| `replicaCount` | Number of Ambassador replicas | `1`
|
||||
| `resources` | CPU/memory resource requests/limits | None
|
||||
| `rbac.create` | If `true`, create and use RBAC resources | `true`
|
||||
| `serviceAccount.create` | If `true`, create a new service account | `true`
|
||||
| `serviceAccount.name` | Service account to be used | `ambassador`
|
||||
| `service.type` | Service type to be used | `LoadBalancer`
|
||||
| `adminService.create` | If `true`, create a service for Ambassador's admin UI | `true`
|
||||
| `adminService.type` | Ambassador's admin service type to be used | `ClusterIP`
|
||||
| `exporter.image` | Prometheus exporter image | `datawire/prom-statsd-exporter:0.6.0`
|
||||
| `timing.restart` | The minimum number of seconds between Envoy restarts | `15`
|
||||
| `timing.drain` | The number of seconds that the Envoy will wait for open connections to drain on a restart | `5`
|
||||
| `timing.shutdown` | The number of seconds that Ambassador will wait for the old Envoy to clean up and exit on a restart | `10`
|
||||
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "ambassador.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "ambassador.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "ambassador.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "ambassador.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,43 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "ambassador.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "ambassador.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "ambassador.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "ambassador.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "ambassador.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,21 +0,0 @@
|
||||
{{- if .Values.adminService.create -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "ambassador.fullname" . }}-admin
|
||||
labels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
chart: {{ template "ambassador.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.adminService.type }}
|
||||
ports:
|
||||
- port: 8877
|
||||
targetPort: admin
|
||||
protocol: TCP
|
||||
name: admin
|
||||
selector:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
@@ -1,78 +0,0 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "ambassador.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
chart: {{ template "ambassador.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9102"
|
||||
spec:
|
||||
serviceAccountName: {{ template "ambassador.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: statsd-sink
|
||||
image: "{{ .Values.exporter.image }}"
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 9102
|
||||
- name: ambassador
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
- name: admin
|
||||
containerPort: 8877
|
||||
env:
|
||||
- name: AMBASSADOR_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: AMBASSADOR_RESTART_TIME
|
||||
value: {{ .Values.timing.restart | quote }}
|
||||
- name: AMBASSADOR_DRAIN_TIME
|
||||
value: {{ .Values.timing.drain | quote }}
|
||||
- name: AMBASSADOR_SHUTDOWN_TIME
|
||||
value: {{ .Values.timing.shutdown | quote }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /ambassador/v0/check_alive
|
||||
port: admin
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ambassador/v0/check_ready
|
||||
port: admin
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 3
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,42 +0,0 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "ambassador.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
chart: {{ template "ambassador.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["create", "update", "patch", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- secrets
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "ambassador.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
chart: {{ template "ambassador.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "ambassador.fullname" . }}
|
||||
subjects:
|
||||
- name: {{ template "ambassador.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
kind: ServiceAccount
|
||||
{{- end -}}
|
||||
@@ -1,23 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "ambassador.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
chart: {{ template "ambassador.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
- port: 443
|
||||
targetPort: https
|
||||
protocol: TCP
|
||||
name: https
|
||||
selector:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,11 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "ambassador.serviceAccountName" . }}
|
||||
labels:
|
||||
app: {{ template "ambassador.name" . }}
|
||||
chart: {{ template "ambassador.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
@@ -1,56 +0,0 @@
|
||||
# Default values for ambassador.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/datawire/ambassador
|
||||
tag: 0.29.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: LoadBalancer
|
||||
port: 80
|
||||
|
||||
adminService:
|
||||
create: true
|
||||
type: ClusterIP
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
resources: {}
|
||||
# If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
exporter:
|
||||
image: datawire/prom-statsd-exporter:0.6.0
|
||||
|
||||
timing:
|
||||
# sets the minimum number of seconds between Envoy restarts
|
||||
restart: 15
|
||||
# sets the number of seconds that the Envoy will wait for open connections to drain on a restart
|
||||
drain: 5
|
||||
# sets the number of seconds that Ambassador will wait for the old Envoy to clean up and exit on a restart
|
||||
shutdown: 10
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,5 +0,0 @@
|
||||
apiVersion: v1
|
||||
appVersion: "1.0"
|
||||
description: A Helm chart for Kubernetes
|
||||
name: grafana
|
||||
version: 0.1.0
|
||||
@@ -1,65 +0,0 @@
|
||||
# Weave Cloud Grafana
|
||||
|
||||
Grafana v5 with Kubernetes dashboards and Prometheus and Weave Cloud data sources.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
$ helm install stable/grafana --name my-release \
|
||||
--set service.type=NodePort \
|
||||
--set token=WEAVE-TOKEN \
|
||||
--set password=admin
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Grafana chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `grafana/grafana`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `5.0.1`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources` | pod resources | `none`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `LoadBalancer`
|
||||
`url` | Prometheus URL, used when Weave token is empty | `http://prometheus:9090`
|
||||
`token` | Weave Cloud token | `none`
|
||||
`user` | Grafana admin username | `admin`
|
||||
`password` | Grafana admin password | `none`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
$ helm install stable/grafana --name my-release \
|
||||
--set=token=WEAVE-TOKEN \
|
||||
--set password=admin
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm install stable/grafana --name my-release -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
```
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,817 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"$$hashKey": "object:246",
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"iteration": 1521457664773,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"colorBackground": false,
|
||||
"colorValue": false,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"datasource": "prometheus",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"format": "none",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
"minValue": 0,
|
||||
"show": false,
|
||||
"thresholdLabels": false,
|
||||
"thresholdMarkers": true
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 4,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 8,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"mappingType": 1,
|
||||
"mappingTypes": [
|
||||
{
|
||||
"name": "value to text",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"name": "range to text",
|
||||
"value": 2
|
||||
}
|
||||
],
|
||||
"maxDataPoints": 100,
|
||||
"nullPointMode": "connected",
|
||||
"nullText": null,
|
||||
"postfix": "cores",
|
||||
"postfixFontSize": "50%",
|
||||
"prefix": "",
|
||||
"prefixFontSize": "50%",
|
||||
"rangeMaps": [
|
||||
{
|
||||
"from": "null",
|
||||
"text": "N/A",
|
||||
"to": "null"
|
||||
}
|
||||
],
|
||||
"sparkline": {
|
||||
"fillColor": "rgba(31, 118, 189, 0.18)",
|
||||
"full": false,
|
||||
"lineColor": "rgb(31, 120, 193)",
|
||||
"show": true
|
||||
},
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[3m])) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A",
|
||||
"step": 600
|
||||
}
|
||||
],
|
||||
"thresholds": "",
|
||||
"title": "CPU",
|
||||
"type": "singlestat",
|
||||
"valueFontSize": "110%",
|
||||
"valueMaps": [
|
||||
{
|
||||
"op": "=",
|
||||
"text": "N/A",
|
||||
"value": "null"
|
||||
}
|
||||
],
|
||||
"valueName": "avg"
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"colorBackground": false,
|
||||
"colorValue": false,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"datasource": "prometheus",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"format": "none",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
"minValue": 0,
|
||||
"show": false,
|
||||
"thresholdLabels": false,
|
||||
"thresholdMarkers": true
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 4,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 0
|
||||
},
|
||||
"id": 9,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"mappingType": 1,
|
||||
"mappingTypes": [
|
||||
{
|
||||
"name": "value to text",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"name": "range to text",
|
||||
"value": 2
|
||||
}
|
||||
],
|
||||
"maxDataPoints": 100,
|
||||
"nullPointMode": "connected",
|
||||
"nullText": null,
|
||||
"postfix": "GB",
|
||||
"postfixFontSize": "50%",
|
||||
"prefix": "",
|
||||
"prefixFontSize": "80%",
|
||||
"rangeMaps": [
|
||||
{
|
||||
"from": "null",
|
||||
"text": "N/A",
|
||||
"to": "null"
|
||||
}
|
||||
],
|
||||
"sparkline": {
|
||||
"fillColor": "rgba(31, 118, 189, 0.18)",
|
||||
"full": false,
|
||||
"lineColor": "rgb(31, 120, 193)",
|
||||
"show": true
|
||||
},
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(container_memory_usage_bytes{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}) / 1024^3",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A",
|
||||
"step": 600
|
||||
}
|
||||
],
|
||||
"thresholds": "",
|
||||
"title": "Memory",
|
||||
"type": "singlestat",
|
||||
"valueFontSize": "110%",
|
||||
"valueMaps": [
|
||||
{
|
||||
"op": "=",
|
||||
"text": "N/A",
|
||||
"value": "null"
|
||||
}
|
||||
],
|
||||
"valueName": "avg"
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"colorBackground": false,
|
||||
"colorValue": false,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"datasource": "prometheus",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"format": "Bps",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
"minValue": 0,
|
||||
"show": false,
|
||||
"thresholdLabels": false,
|
||||
"thresholdMarkers": false
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 4,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 0
|
||||
},
|
||||
"id": 7,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"mappingType": 1,
|
||||
"mappingTypes": [
|
||||
{
|
||||
"name": "value to text",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"name": "range to text",
|
||||
"value": 2
|
||||
}
|
||||
],
|
||||
"maxDataPoints": 100,
|
||||
"nullPointMode": "connected",
|
||||
"nullText": null,
|
||||
"postfix": "",
|
||||
"postfixFontSize": "50%",
|
||||
"prefix": "",
|
||||
"prefixFontSize": "50%",
|
||||
"rangeMaps": [
|
||||
{
|
||||
"from": "null",
|
||||
"text": "N/A",
|
||||
"to": "null"
|
||||
}
|
||||
],
|
||||
"sparkline": {
|
||||
"fillColor": "rgba(31, 118, 189, 0.18)",
|
||||
"full": false,
|
||||
"lineColor": "rgb(31, 120, 193)",
|
||||
"show": true
|
||||
},
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(container_network_transmit_bytes_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[3m])) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A",
|
||||
"step": 600
|
||||
}
|
||||
],
|
||||
"thresholds": "",
|
||||
"title": "Network",
|
||||
"type": "singlestat",
|
||||
"valueFontSize": "80%",
|
||||
"valueMaps": [
|
||||
{
|
||||
"op": "=",
|
||||
"text": "N/A",
|
||||
"value": "null"
|
||||
}
|
||||
],
|
||||
"valueName": "avg"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "prometheus",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fill": 1,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 20,
|
||||
"x": 0,
|
||||
"y": 4
|
||||
},
|
||||
"id": 1,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"hideZero": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "max(kube_deployment_status_replicas{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "current replicas",
|
||||
"refId": "A",
|
||||
"step": 30
|
||||
},
|
||||
{
|
||||
"expr": "min(kube_deployment_status_replicas_available{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "available",
|
||||
"refId": "B",
|
||||
"step": 30
|
||||
},
|
||||
{
|
||||
"expr": "max(kube_deployment_status_replicas_unavailable{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "unavailable",
|
||||
"refId": "C",
|
||||
"step": 30
|
||||
},
|
||||
{
|
||||
"expr": "min(kube_deployment_status_replicas_updated{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "updated",
|
||||
"refId": "D",
|
||||
"step": 30
|
||||
},
|
||||
{
|
||||
"expr": "max(kube_deployment_spec_replicas{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "desired",
|
||||
"refId": "E",
|
||||
"step": 30
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Replicas",
|
||||
"tooltip": {
|
||||
"msResolution": true,
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "cumulative"
|
||||
},
|
||||
"transparent": false,
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "none",
|
||||
"label": "",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"colorBackground": false,
|
||||
"colorValue": false,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"datasource": "prometheus",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"format": "percent",
|
||||
"gauge": {
|
||||
"maxValue": 100,
|
||||
"minValue": 0,
|
||||
"show": true,
|
||||
"thresholdLabels": false,
|
||||
"thresholdMarkers": true
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 4,
|
||||
"x": 20,
|
||||
"y": 4
|
||||
},
|
||||
"id": 11,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"mappingType": 1,
|
||||
"mappingTypes": [
|
||||
{
|
||||
"name": "value to text",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"name": "range to text",
|
||||
"value": 2
|
||||
}
|
||||
],
|
||||
"maxDataPoints": 100,
|
||||
"nullPointMode": "connected",
|
||||
"nullText": null,
|
||||
"postfix": "",
|
||||
"postfixFontSize": "50%",
|
||||
"prefix": "",
|
||||
"prefixFontSize": "50%",
|
||||
"rangeMaps": [
|
||||
{
|
||||
"from": "null",
|
||||
"text": "N/A",
|
||||
"to": "null"
|
||||
}
|
||||
],
|
||||
"sparkline": {
|
||||
"fillColor": "rgba(31, 118, 189, 0.18)",
|
||||
"full": false,
|
||||
"lineColor": "rgb(31, 120, 193)",
|
||||
"show": false
|
||||
},
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(min(kube_deployment_status_replicas_available{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod))\n/\n(min(kube_deployment_spec_replicas{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)) * 100\n",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "",
|
||||
"refId": "A",
|
||||
"step": 600
|
||||
}
|
||||
],
|
||||
"thresholds": "50,80",
|
||||
"title": "Availability",
|
||||
"type": "singlestat",
|
||||
"valueFontSize": "80%",
|
||||
"valueMaps": [
|
||||
{
|
||||
"op": "=",
|
||||
"text": "N/A",
|
||||
"value": "null"
|
||||
}
|
||||
],
|
||||
"valueName": "avg"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "prometheus",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 10
|
||||
},
|
||||
"id": 10,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"sort": "current",
|
||||
"sortDesc": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sort_desc(\n\nsum by (pod_name) (rate(container_cpu_usage_seconds_total{image!=\"\",container_name!=\"POD\",namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[1m])) \n\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{ pod_name }}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Pods CPU",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "none",
|
||||
"label": "cores",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "prometheus",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 13,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"sort": "current",
|
||||
"sortDesc": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sort_desc(\n\nsum by (pod_name) (container_memory_working_set_bytes{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}) \n\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{ pod_name }}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Pods Memory",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "bytes",
|
||||
"label": "",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "prometheus",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 22
|
||||
},
|
||||
"id": 12,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"sort": "current",
|
||||
"sortDesc": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sort_desc(\n\nsum by (pod_name) (rate (container_network_receive_bytes_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[1m])) \n+\nsum by (pod_name) (rate (container_network_transmit_bytes_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[1m]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{ pod_name }}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Pods Network I/O",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "Bps",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"refresh": "30s",
|
||||
"schemaVersion": 16,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"text": "default",
|
||||
"value": "default"
|
||||
},
|
||||
"datasource": "prometheus",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Namespace",
|
||||
"multi": false,
|
||||
"name": "deployment_namespace",
|
||||
"options": [],
|
||||
"query": "label_values(kube_deployment_metadata_generation, namespace)",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 0,
|
||||
"tagValuesQuery": null,
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "ga-podinfo",
|
||||
"value": "ga-podinfo"
|
||||
},
|
||||
"datasource": "prometheus",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Deployment",
|
||||
"multi": false,
|
||||
"name": "deployment_name",
|
||||
"options": [],
|
||||
"query": "label_values(kube_deployment_metadata_generation{namespace=\"$deployment_namespace\"}, deployment)",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 0,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "deployment",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-5m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "browser",
|
||||
"title": "Deployments",
|
||||
"uid": "sgRyigkik",
|
||||
"version": 2
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "grafana.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "grafana.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "grafana.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "grafana.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
data:
|
||||
{{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }}
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
data:
|
||||
datasources.yaml: |-
|
||||
apiVersion: 1
|
||||
|
||||
deleteDatasources:
|
||||
- name: prometheus
|
||||
{{- if .Values.token }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: https://cloud.weave.works/api/prom
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
basicAuth: true
|
||||
basicAuthUser: weave
|
||||
basicAuthPassword: {{ .Values.token }}
|
||||
{{- else }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: {{ .Values.url }}
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
{{- end }}
|
||||
@@ -1,81 +0,0 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.name" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "grafana.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "grafana.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'false'
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
env:
|
||||
- name: GF_SECURITY_ADMIN_USER
|
||||
value: {{ .Values.user }}
|
||||
- name: GF_SECURITY_ADMIN_PASSWORD
|
||||
value: {{ .Values.password }}
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
volumeMounts:
|
||||
- name: grafana
|
||||
mountPath: /var/lib/grafana
|
||||
- name: dashboards
|
||||
mountPath: /etc/grafana/dashboards
|
||||
- name: datasources
|
||||
mountPath: /etc/grafana/provisioning/datasources
|
||||
- name: providers
|
||||
mountPath: /etc/grafana/provisioning/dashboards
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: grafana
|
||||
emptyDir: {}
|
||||
- name: dashboards
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
- name: providers
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
- name: datasources
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
data:
|
||||
providers.yaml: |+
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'default'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/dashboards
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.name" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "grafana.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,34 +0,0 @@
|
||||
# Default values for grafana.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 5.0.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
user: admin
|
||||
password:
|
||||
|
||||
url: http://prometheus:9090
|
||||
token:
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,5 +0,0 @@
|
||||
apiVersion: v1
|
||||
appVersion: "1.0"
|
||||
description: A Ngrok Helm chart for Kubernetes
|
||||
name: ngrok
|
||||
version: 0.1.0
|
||||
@@ -1,63 +0,0 @@
|
||||
# Ngrok
|
||||
|
||||
Expose Kubernetes service with [Ngrok](https://ngrok.com).
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
$ helm install sp/ngrok --name my-release \
|
||||
--set token=NGROK-TOKEN \
|
||||
--set expose.service=podinfo:9898
|
||||
```
|
||||
|
||||
The command deploys Ngrok on the Kubernetes cluster in the default namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Grafana chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `stefanprodan/ngrok`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `latest`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`token` | Ngrok auth token | `none`
|
||||
`expose.service` | Service address to be exposed as in `service-name:port` | `none`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
$ helm upgrade --install --wait tunel \
|
||||
--set token=NGROK-TOKEN \
|
||||
--set service.type=NodePort \
|
||||
--set expose.service=podinfo:9898 \
|
||||
sp/ngrok
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm install sp/grafana --name my-release -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
```
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "ngrok.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "ngrok.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "ngrok.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "ngrok.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "ngrok.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "ngrok.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "ngrok.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "ngrok.fullname" . }}
|
||||
data:
|
||||
ngrok.yml: |-
|
||||
web_addr: 0.0.0.0:4040
|
||||
update: false
|
||||
log: stdout
|
||||
{{- if .Values.token }}
|
||||
authtoken: {{ .Values.token }}
|
||||
{{- end }}
|
||||
@@ -1,62 +0,0 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "ngrok.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "ngrok.name" . }}
|
||||
chart: {{ template "ngrok.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "ngrok.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "ngrok.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'false'
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- ./ngrok
|
||||
- http
|
||||
- {{ .Values.expose.service }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /home/ngrok/.ngrok2
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 4040
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /api/tunnels
|
||||
port: http
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "ngrok.fullname" . }}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "ngrok.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "ngrok.name" . }}
|
||||
chart: {{ template "ngrok.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "ngrok.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,25 +0,0 @@
|
||||
# Default values for ngrok.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: stefanprodan/ngrok
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 4040
|
||||
|
||||
expose:
|
||||
service: ga-podinfo:9898
|
||||
|
||||
token: 4i3rDinhLqMHtvez71N9S_38rkS7onwv77VFNZTaUR6
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: v1
|
||||
appVersion: "0.2.1"
|
||||
description: Podinfo Helm chart for Kubernetes
|
||||
name: podinfo
|
||||
version: 0.1.0
|
||||
home: https://github.com/stefanprodan/k8s-podinfo
|
||||
sources:
|
||||
- https://github.com/stefanprodan/k8s-podinfo
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
engine: gotpl
|
||||
@@ -1,76 +0,0 @@
|
||||
# Podinfo
|
||||
|
||||
Podinfo is a tiny web application made with Go
|
||||
that showcases best practices of running microservices in Kubernetes.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
$ helm install stable/podinfo --name my-release
|
||||
```
|
||||
|
||||
The command deploys podinfo on the Kubernetes cluster in the default namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the podinfo chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`affinity` | node/pod affinities | None
|
||||
`hpa.enabled` | Enables HPA | `false`
|
||||
`hpa.cpu` | Target CPU usage per pod | None
|
||||
`hpa.memory` | Target memory usage per pod | None
|
||||
`hpa.requests` | Target requests per second per pod | None
|
||||
`hpa.maxReplicas` | Maximum pod replicas | `10`
|
||||
`ingress.hosts` | Ingress accepted hostnames | None
|
||||
`ingress.tls` | Ingress TLS configuration | None:
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.repository` | Image repository | `stefanprodan/podinfo`
|
||||
`image.tag` | Image tag | `0.0.1`
|
||||
`ingress.enabled` | Enables Ingress | `false`
|
||||
`ingress.annotations` | Ingress annotations | None
|
||||
`ingress.hosts` | Ingress accepted hostnames | None
|
||||
`ingress.tls` | Ingress TLS configuration | None
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`podAnnotations` | annotations to add to each pod | `{}`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources.requests/cpu` | pod CPU request | `1m`
|
||||
`resources.requests/memory` | pod memory request | `16Mi`
|
||||
`resources.limits/cpu` | pod CPU limit | None
|
||||
`resources.limits/memory` | pod memory limit | None
|
||||
`service.externalPort` | external port for the service | `9898`
|
||||
`service.internalPort` | internal port for the service | `9898`
|
||||
`service.nodePort` | node port for the service | `31198`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
$ helm install stable/podinfo --name my-release \
|
||||
--set=image.tag=0.0.2,service.type=NodePort
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm install stable/podinfo --name my-release -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
```
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "podinfo.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "podinfo.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "podinfo.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "podinfo.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:{{ .Values.service.externalPort }}
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "podinfo.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "podinfo.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "podinfo.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,68 +0,0 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port={{ .Values.service.containerPort }}
|
||||
{{- if .Values.logLevel }}
|
||||
- -debug=true
|
||||
{{- end }}
|
||||
env:
|
||||
- name: backend_url
|
||||
value: {{ .Values.backend }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.containerPort }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: http
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{- if .Values.hpa.enabled -}}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
minReplicas: {{ .Values.replicaCount }}
|
||||
maxReplicas: {{ .Values.hpa.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.hpa.cpu }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.hpa.cpu }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.memory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: {{ .Values.hpa.memory }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.requests }}
|
||||
- type: Pod
|
||||
pods:
|
||||
metricName: http_requests
|
||||
targetAverageValue: {{ .Values.hpa.requests }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,39 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "podinfo.fullname" . -}}
|
||||
{{- $servicePort := .Values.service.port -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,22 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.externalPort }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-service-test-{{ randAlphaNum 5 | lower }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: curl
|
||||
image: radial/busyboxplus:curl
|
||||
command: ['curl']
|
||||
args: ['{{ template "podinfo.fullname" . }}:{{ .Values.service.externalPort }}']
|
||||
restartPolicy: Never
|
||||
@@ -1,41 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-storage-test-{{ randAlphaNum 5 | lower }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: curl
|
||||
image: radial/busyboxplus:curl
|
||||
command: ["/bin/sh", "/scripts/ping.sh"]
|
||||
env:
|
||||
- name: PODINFO_SVC
|
||||
value: {{ template "podinfo.fullname" . }}:{{ .Values.service.externalPort }}
|
||||
volumeMounts:
|
||||
- name: scripts
|
||||
mountPath: /scripts
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: scripts
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}-storage-cfg
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-storage-cfg
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
data:
|
||||
ping.sh: |
|
||||
#!/bin/sh
|
||||
curl -sSd "$(curl -sSd 'test' ${PODINFO_SVC}/write)" ${PODINFO_SVC}/read|grep test
|
||||
@@ -1,53 +0,0 @@
|
||||
# Default values for podinfo.
|
||||
|
||||
replicaCount: 1
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
|
||||
image:
|
||||
repository: stefanprodan/podinfo
|
||||
tag: 0.2.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
externalPort: 9898
|
||||
containerPort: 9898
|
||||
nodePort: 31198
|
||||
|
||||
# Heapster or metrics-server add-on required
|
||||
hpa:
|
||||
enabled: false
|
||||
maxReplicas: 10
|
||||
# average total CPU usage per pod (1-100)
|
||||
cpu:
|
||||
# average memory usage per pod (100Mi-1Gi)
|
||||
memory:
|
||||
# average http requests per second per pod (k8s-prometheus-adapter)
|
||||
requests:
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- podinfo.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources:
|
||||
limits:
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 16Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
logLevel: debug
|
||||
@@ -1,4 +0,0 @@
|
||||
steps:
|
||||
- name: 'gcr.io/cloud-builders/docker'
|
||||
args: ['build','-f' , 'Dockerfile.ci', '-t', 'gcr.io/$PROJECT_ID/podinfo:$BRANCH_NAME-$SHORT_SHA', '.']
|
||||
images: ['gcr.io/$PROJECT_ID/podinfo:$BRANCH_NAME-$SHORT_SHA']
|
||||
@@ -1,37 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/stefanprodan/k8s-podinfo/pkg/server"
|
||||
"github.com/stefanprodan/k8s-podinfo/pkg/signals"
|
||||
"github.com/stefanprodan/k8s-podinfo/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
port string
|
||||
debug bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&port, "port", "8989", "Port to listen on.")
|
||||
flag.BoolVar(&debug, "debug", false, "sets log level to debug")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
if debug {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
}
|
||||
|
||||
log.Info().Msgf("Starting podinfo version %s commit %s", version.VERSION, version.GITCOMMIT)
|
||||
log.Debug().Msgf("Starting HTTP server on port %v", port)
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
server.ListenAndServe(port, 5*time.Second, stopCh)
|
||||
}
|
||||
BIN
cuddle_bunny.gif
Normal file
BIN
cuddle_bunny.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 50 KiB |
BIN
cuddle_clap.gif
Normal file
BIN
cuddle_clap.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 42 KiB |
@@ -1,52 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: blue
|
||||
labels:
|
||||
app: blue
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: blue
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:0.0.9
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -logtostderr=true
|
||||
- -v=2
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 2
|
||||
failureThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 3
|
||||
failureThreshold: 2
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: blue
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: blue
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: blue.default:9898
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: blue
|
||||
@@ -1,52 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: green
|
||||
labels:
|
||||
app: green
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: green
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: green
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:0.0.9
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -logtostderr=true
|
||||
- -v=2
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 2
|
||||
failureThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 3
|
||||
failureThreshold: 2
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
@@ -1,51 +0,0 @@
|
||||
# test cmd:
|
||||
# while true; do sleep 1; curl -sS -H "Host: podinfo.test" -H "x-subscription: trial" http://35.198.122.99:30080|grep HOSTNAME;done
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: green
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: green
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: green.default:9898
|
||||
weight: 50
|
||||
headers:
|
||||
X-Subscription: trial
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: green
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: blue-trial
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: blue-trial
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: blue.default:9898
|
||||
headers:
|
||||
X-Subscription: trial
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: blue
|
||||
@@ -1,70 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ambassador
|
||||
labels:
|
||||
component: ambassador
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
component: ambassador
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ambassador
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9102"
|
||||
spec:
|
||||
serviceAccountName: ambassador
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
component: ambassador
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: ambassador
|
||||
image: quay.io/datawire/ambassador:0.26.1
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
- containerPort: 443
|
||||
name: https
|
||||
- containerPort: 8877
|
||||
name: admin
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: AMBASSADOR_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /ambassador/v0/check_alive
|
||||
port: 8877
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ambassador/v0/check_ready
|
||||
port: 8877
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 3
|
||||
- name: statsd-sink
|
||||
image: datawire/prom-statsd-exporter:0.6.0
|
||||
ports:
|
||||
- containerPort: 9102
|
||||
name: metrics
|
||||
restartPolicy: Always
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: ambassador
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["create", "update", "patch", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- secrets
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ambassador
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ambassador
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ambassador
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ambassador
|
||||
namespace: default
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
component: ambassador
|
||||
name: ambassador
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 80
|
||||
nodePort: 30080
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 443
|
||||
nodePort: 30443
|
||||
- name: admin
|
||||
port: 8877
|
||||
targetPort: 8877
|
||||
nodePort: 30877
|
||||
selector:
|
||||
component: ambassador
|
||||
@@ -1,83 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:0.0.9
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -logtostderr=true
|
||||
- -v=2
|
||||
volumeMounts:
|
||||
- name: metadata
|
||||
mountPath: /etc/podinfod/metadata
|
||||
readOnly: true
|
||||
- name: resources
|
||||
mountPath: /etc/podinfod/resources
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 2
|
||||
failureThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 3
|
||||
failureThreshold: 2
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
volumes:
|
||||
- name: metadata
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels
|
||||
- path: "annotations"
|
||||
fieldRef:
|
||||
fieldPath: metadata.annotations
|
||||
- name: resources
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "cpu_limit"
|
||||
resourceFieldRef:
|
||||
containerName: podinfod
|
||||
resource: limits.cpu
|
||||
- path: "cpu_request"
|
||||
resourceFieldRef:
|
||||
containerName: podinfod
|
||||
resource: requests.cpu
|
||||
- path: "mem_limit"
|
||||
resourceFieldRef:
|
||||
containerName: podinfod
|
||||
resource: limits.memory
|
||||
- path: "mem_request"
|
||||
resourceFieldRef:
|
||||
containerName: podinfod
|
||||
resource: requests.memory
|
||||
@@ -1,21 +0,0 @@
|
||||
---
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 10
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: 80
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: 200Mi
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo-clusterip
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: podinfo
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo-nodeport
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
nodePort: 31190
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: podinfo
|
||||
@@ -1,48 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: canarya-podinfo
|
||||
labels:
|
||||
app: canarya-podinfo
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: canarya-podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: canarya-podinfo
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:0.2.0
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -debug=true
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 5
|
||||
failureThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 10
|
||||
failureThreshold: 2
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
@@ -1,27 +0,0 @@
|
||||
# test cmd:
|
||||
# while true; do sleep 1; curl -sS -H "Host: podinfo.test" -H "x-user: insider" http://35.198.122.99:30080/version;done
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: canarya-podinfo
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: canarya-podinfo
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: canarya-podinfo.default:9898
|
||||
headers:
|
||||
X-User: insider
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: canarya-podinfo
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: canaryb-podinfo
|
||||
labels:
|
||||
app: canaryb-podinfo
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: canaryb-podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: canaryb-podinfo
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:0.2.1
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -debug=true
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 5
|
||||
failureThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 10
|
||||
failureThreshold: 2
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
@@ -1,28 +0,0 @@
|
||||
# test cmd:
|
||||
# while true; do sleep 1; curl -sS -H "Host: podinfo.test" -H "x-user: insider" http://35.198.122.99:30080/version;done
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: canaryb-podinfo
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: canaryb-podinfo
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: canaryb-podinfo.default:9898
|
||||
weight: 10
|
||||
headers:
|
||||
X-User: insider
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: canaryb-podinfo
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ga-podinfo
|
||||
labels:
|
||||
app: ga-podinfo
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ga-podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ga-podinfo
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:0.1.0
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -logtostderr=true
|
||||
- -v=2
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 5
|
||||
failureThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9898
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 10
|
||||
failureThreshold: 2
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ga-podinfo
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: ga-podinfo
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: ga-podinfo.default:9898
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: ga-podinfo
|
||||
@@ -1,65 +0,0 @@
|
||||
### Cloud9 IDE Kubernetes
|
||||
|
||||
Create the namespaces:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/k9/k9-ns.yaml
|
||||
```
|
||||
|
||||
Create a secret with the Git ssh key:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/k9/ssh-key.yaml
|
||||
```
|
||||
|
||||
Create the Git Server deploy and service:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/k9/git-dep.yaml
|
||||
kubectl apply -f ./deploy/k9/git-svc.yaml
|
||||
```
|
||||
|
||||
Create the Cloud9 IDE deployment:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/k9/
|
||||
```
|
||||
|
||||
Find the public IP:
|
||||
|
||||
```bash
|
||||
kubectl -n ide get svc --selector=name=ide
|
||||
```
|
||||
|
||||
Open Cloud9 IDE in your browser, login with `username/password` and run the following commands:
|
||||
|
||||
```bash
|
||||
ssh-keyscan gitsrv >> ~/.ssh/known_hosts
|
||||
git config --global user.email "user@weavedx.com"
|
||||
git config --global user.name "User"
|
||||
```
|
||||
|
||||
Exec into the Git server and create a repo:
|
||||
|
||||
```bash
|
||||
kubectl -n ide exec -it gitsrv-69b4cd5fc-dd6rf -- sh
|
||||
|
||||
/git-server # cd repos
|
||||
/git-server # mkdir myrepo.git
|
||||
/git-server # cd myrepo.git
|
||||
/git-server # git init --shared=true
|
||||
/git-server # git add .
|
||||
/git-server # git config --global user.email "user@weavedx.com"
|
||||
/git-server # git config --global user.name "User"
|
||||
/git-server # git commit -m "init"
|
||||
/git-server # git checkout -b dummy
|
||||
```
|
||||
|
||||
Go back to the Cloud9 IDE and clone the repo:
|
||||
|
||||
```bash
|
||||
git clone ssh://git@gitsrv/git-server/repos/myrepo.git
|
||||
git add .
|
||||
git commit -m "test"
|
||||
git push origin master
|
||||
```
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
name: gitsrv
|
||||
name: gitsrv
|
||||
namespace: ide
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: gitsrv
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: gitsrv
|
||||
spec:
|
||||
containers:
|
||||
- image: jkarlos/git-server-docker
|
||||
name: git
|
||||
ports:
|
||||
- containerPort: 22
|
||||
name: ssh
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /git-server/repos
|
||||
name: git-server-data
|
||||
- mountPath: /git-server/keys
|
||||
name: ssh-git
|
||||
volumes:
|
||||
- name: ssh-git
|
||||
secret:
|
||||
secretName: ssh-git
|
||||
- name: git-server-data
|
||||
persistentVolumeClaim:
|
||||
claimName: git-server-data
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
name: git-server-data
|
||||
namespace: ide
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: gitsrv
|
||||
name: gitsrv
|
||||
namespace: ide
|
||||
spec:
|
||||
ports:
|
||||
- name: ssh
|
||||
port: 22
|
||||
protocol: TCP
|
||||
targetPort: ssh
|
||||
selector:
|
||||
name: gitsrv
|
||||
type: ClusterIP
|
||||
@@ -1,92 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
name: ide
|
||||
namespace: ide
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
name: ide
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
spec:
|
||||
serviceAccount: ide
|
||||
serviceAccountName: ide
|
||||
initContainers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- test -d /workspace/k8s-podinfo || git clone https://github.com/stefanprodan/k8s-podinfo
|
||||
k8s-podinfo
|
||||
image: stefanprodan/k9c:0.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: git-clone
|
||||
volumeMounts:
|
||||
- mountPath: /workspace
|
||||
name: data
|
||||
containers:
|
||||
- name: ide
|
||||
args:
|
||||
- --auth
|
||||
- username:password
|
||||
image: stefanprodan/k9c:0.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
httpHeaders:
|
||||
- name: Authorization
|
||||
value: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
|
||||
path: /ide.html
|
||||
port: http
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
httpHeaders:
|
||||
- name: Authorization
|
||||
value: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
|
||||
path: /ide.html
|
||||
port: http
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 1
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
volumeMounts:
|
||||
- mountPath: /workspace
|
||||
name: data
|
||||
- mountPath: /var/run/docker.sock
|
||||
name: dockersocket
|
||||
- mountPath: /root/.ssh
|
||||
name: ssh-git
|
||||
volumes:
|
||||
- name: ssh-git
|
||||
secret:
|
||||
defaultMode: 0600
|
||||
secretName: ssh-git
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
# - name: ide-workspace-data
|
||||
# persistentVolumeClaim:
|
||||
# claimName: ide-workspace-data
|
||||
- hostPath:
|
||||
path: /var/run/docker.sock
|
||||
type: ""
|
||||
name: dockersocket
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ide
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ide-workspace
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
name: ide-workspace-data
|
||||
namespace: ide
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -1,78 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
name: ide
|
||||
namespace: ide
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
name: ide-cluster-view
|
||||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- nonResourceURLs:
|
||||
- '*'
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
name: ide-cluster-view
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ide-cluster-view
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ide
|
||||
namespace: ide
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: ide
|
||||
name: ide-workspace-workspace-admin
|
||||
namespace: ide-workspace
|
||||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: ide
|
||||
name: ide-workspace-workspace-admin
|
||||
namespace: ide-workspace
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ide-workspace-workspace-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ide
|
||||
namespace: ide
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: ide
|
||||
name: ide
|
||||
namespace: ide
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
selector:
|
||||
name: ide
|
||||
type: LoadBalancer
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
# ssh-keyscan gitsrv >> ~/.ssh/known_hosts
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
data:
|
||||
id_rsa: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBczNuS2xrdkhLYlBEVlZCZTJLdGJSQ0dIcmphYU5MVkI3bVRHMjEwSjZpRXg4RnJYCldkR1YvZDJOUWJhYVFDQ1JDaHc3THdqSFFkeU5lVHJoam0xbGgzY0xzRk04RTJxRUEwZ0hMdVVyL2dlTkx6K0kKN2pzc0xTakJ0MTB6NWVYVzFjNUJaWWRBdElOK3dOTmpPTFptR21uV0NCQjBBUmx2M3d6RFNjOXE4NXZ2UXRjRwpxWjNnY0tReS9ZVDd3TXM0Y052ZW9CWDlBNVZOcnZrTjlrT3VBYmlCTFpUMWtCMWxxNVVsQ3FRVUpBNFQ4bVN5Clh5eXNsejhGcWxTb2xac1FKQmtSZmlDYlNQcENPcEVmc1h4L3ltTzlCaGVzMTVBOW42cDlVc253dDRjRzhwYjYKaEV6K0ZYQ0M5N3QrUXJ6L2dIV2w1bGI5SFViWFJPQ3FSaGxZWFFJREFRQUJBb0lCQURPNmNhcDU4dEJSTUlhQgpZS1RnVnJDL1hVVFlGZ0FGRWhpczBTZmVuQUh3U1YxMlFVVnhBK01rblJjSWsxTFNVQnR5NFpmLzFyWmo1QjdCCjJzcmJPdjBkVWJBamZWZzNSZzlRRGtuMDRZWVpGUmMrSDdCU083eFVWK2tLb3UrckhBMkpvZzJxT3ZzTVAxZEMKVmdHOWlKWXFWUGNFRHZ0V0lvbE9PMmNsc2pTK0REZEd4b0J2amFRbzhHVlZkaUZrekdOVmdTQkZnM1dPZnRsOApKMjZyNndzYVVXZDYrYyt1aTRFdUtJTjg2SEhNT0t0bDExbjd6QjNYVWllTEZVSW1GUjhHNHRHQlZwZXhzeVFpCmxjc2dUM1NnWEZDOUp6Q0VSUGRrTXVLSHVWMDBad01maU1ZK3N4Z3RsdXJRMnNhbVhXTmV4eGRlanpDR1RIcWcKV2xRWkJtRUNnWUVBNE5CN3VwMjZURFlUV0tyZ014aGJueHJaT0wxY3VhT1lWQUJnWWh5b2JnMWVabE10Mk1pVgpVaWxaamFkRnZ5emZjR1BTVEpjSjVmMmkvQ0dzS243RXFxUXdoZnd1S3ZRZTQyazgwbk5BcUczdTkvdkl4bklCCnFGZW5kTTE3SlN2WkU3NXFCVE9uTXVVZ1NuNFJoTXpzOEg3UTFmZFQ4UGMvTVRmRVVKcTQzcGtDZ1lFQXpGOUMKd1g0Z0UvUnZlbWZRc0ZVZ29TQ0lQdkR1bFhNNzNsK09XRnByOE00MWxpeGcvd0lvQ2NFZlpGcFdLUkpqSmwvUwpOVFh3YVhnOGg4RGl3a3d3dzJmcmNvWTl2TGNIcGxvWVRkN1ZjUVk4UGRKdjNJeGFReld6SHpMR3N0M29hZ08rCmJDbStsMEY5TnY0VUdWRHUrT0RSQjJyRWo2b1ZGRmh0SUQxbmRtVUNnWUJHS3V3alQrMkFzZlFSM2F1Q1p4eloKcVFDWmhBajM3QWEwV1RXOENhUE1UYUhrSUJ3VUtHN3FxUHRKaWlicngyNnAzbzRaMTU2QVNVemdrd1h3Y1lhaQptQUtKSHkrdHVtb1ZvcGdZTzE2Mzh5LzkrSGt1N3hCellZQmpwV3JGTEUxaHF6SGVFOFFnejREbm56ZUtrb2QxCmZLOWp5UUZMR1hDQXhSNGg1bGpES1FLQmdRQytqUjlmNjZvYkVQQ1Q3NUhicHpPS0tCd0FtNEhJWkszd2M2WHoKNlRMMVRqOFdhd0J4SStDUzM3YldTWWhHT1RlckF2S3EzRVR4QWNObVM4amhva3BoRjFhbTdGVkp6Rm5jbCtwTApTTFkzOExsZ1p3SVhYK0dWQXMrbENpSExpaTMyRXRHTVpndW5XYzlXNCtWM2lVZVhVMzV4N1BHaWhkR3JxNXJyCjBYVFRKUUtCZ0FReUF0RlloVHRONktCSER2NFdiTDQxcnBtcUlVcUlpV0R6a3FPT1ZXcHgzYkpTdWVNeDEyUjQKWHVVaGkwL2ZqbGFvMmYwWTBqbTBDUlQ5ZmlhQW56WHNMRXNzN2JYQ0ZZcGt3V3ZrNnNqV1BCWGdPUnBZbklHNQpRRWNFeklzRDFKQm1EY0RxdWxpZ0dnUzNIdGhiWTl5WW4vU3l4d0owcU5ob3BDS1d2OWNOCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
|
||||
id_rsa.pub: c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDemVjcVdTOGNwczhOVlVGN1lxMXRFSVlldU5wbzB0VUh1Wk1iYlhRbnFJVEh3V3RkWjBaWDkzWTFCdHBwQUlKRUtIRHN2Q01kQjNJMTVPdUdPYldXSGR3dXdVendUYW9RRFNBY3U1U3YrQjQwdlA0anVPeXd0S01HM1hUUGw1ZGJWemtGbGgwQzBnMzdBMDJNNHRtWWFhZFlJRUhRQkdXL2ZETU5KejJyem0rOUMxd2FwbmVCd3BETDloUHZBeXpodzI5NmdGZjBEbFUydStRMzJRNjRCdUlFdGxQV1FIV1dybFNVS3BCUWtEaFB5WkxKZkxLeVhQd1dxVktpVm14QWtHUkYrSUp0SStrSTZrUit4ZkgvS1k3MEdGNnpYa0QyZnFuMVN5ZkMzaHdieWx2cUVUUDRWY0lMM3UzNUN2UCtBZGFYbVZ2MGRSdGRFNEtwR0dWaGQgdXNlckB3ZWF2ZWR4LmNvbQo=
|
||||
metadata:
|
||||
name: ssh-git
|
||||
namespace: ide
|
||||
type: Opaque
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: podinfo-vol-0
|
||||
annotations:
|
||||
"volume.alpha.kubernetes.io/node-affinity": '{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [
|
||||
{ "matchExpressions": [
|
||||
{ "key": "kubernetes.io/hostname",
|
||||
"operator": "In",
|
||||
"values": ["kube-node-0"]
|
||||
}
|
||||
]}
|
||||
]}}'
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-ssd
|
||||
local:
|
||||
path: /mnt/data
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: podinfo-vol-1
|
||||
annotations:
|
||||
"volume.alpha.kubernetes.io/node-affinity": '{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [
|
||||
{ "matchExpressions": [
|
||||
{ "key": "kubernetes.io/hostname",
|
||||
"operator": "In",
|
||||
"values": ["kube-node-1"]
|
||||
}
|
||||
]}
|
||||
]}}'
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-ssd
|
||||
local:
|
||||
path: /mnt/data
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: false
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
selector:
|
||||
app: podinfo
|
||||
@@ -1,76 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
serviceName: "podinfo"
|
||||
replicas: 2
|
||||
podManagementPolicy: OrderedReady
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: role
|
||||
operator: In
|
||||
values:
|
||||
- local-ssd
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- podinfo
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
containers:
|
||||
- name: probe
|
||||
image: stefanprodan/podinfo:0.0.9
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -logtostderr=true
|
||||
- -v=2
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9898
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 9898
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 9898
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: local-ssd
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-ssd
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
118
docs/1-deploy.md
118
docs/1-deploy.md
@@ -1,118 +0,0 @@
|
||||
# Deploy and upgrade guide
|
||||
|
||||

|
||||
|
||||
### Setup Helm
|
||||
|
||||
Install Helm CLI:
|
||||
|
||||
```bash
|
||||
brew install kubernetes-helm
|
||||
```
|
||||
|
||||
Create a service account for Tiller:
|
||||
|
||||
```bash
|
||||
kubectl -n kube-system create sa tiller
|
||||
```
|
||||
|
||||
Create a cluster role binding for Tiller:
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding tiller-cluster-rule \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:tiller
|
||||
```
|
||||
|
||||
Deploy Tiller in kube-system namespace:
|
||||
|
||||
```bash
|
||||
helm init --skip-refresh --upgrade --service-account tiller
|
||||
```
|
||||
|
||||
### Using the Helm chart
|
||||
|
||||
Add k8s-podinfo repo:
|
||||
|
||||
```bash
|
||||
helm repo add sp https://stefanprodan.github.io/k8s-podinfo
|
||||
```
|
||||
|
||||
Create a namespace:
|
||||
|
||||
```bash
|
||||
kubectl create namespace test
|
||||
```
|
||||
|
||||
Create a release named frontend:
|
||||
|
||||
```bash
|
||||
helm upgrade --install --wait frontend \
|
||||
--set service.type=NodePort \
|
||||
--set service.nodePort=30098 \
|
||||
--namespace test \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Check if frontend is accessible from within the cluster:
|
||||
|
||||
```bash
|
||||
helm test --cleanup frontend
|
||||
```
|
||||
|
||||
Check if the frontend is available from outside the cluster:
|
||||
|
||||
```bash
|
||||
curl http://<KUBE_PUBLIC_IP>:30098/version
|
||||
```
|
||||
|
||||
Set CPU/memory requests and limits:
|
||||
|
||||
```bash
|
||||
helm upgrade --reuse-values frontend \
|
||||
--set resources.requests.cpu=10m \
|
||||
--set resources.limits.cpu=100m \
|
||||
--set resources.requests.memory=16Mi \
|
||||
--set resources.limits.memory=128Mi \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Setup horizontal pod autoscaling (HPA) based on CPU average usage and memory consumption:
|
||||
|
||||
```bash
|
||||
helm upgrade --reuse-values frontend \
|
||||
--set hpa.enabled=true \
|
||||
--set hpa.maxReplicas=10 \
|
||||
--set hpa.cpu=80 \
|
||||
--set hpa.memory=200Mi \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Increase the minimum replica count:
|
||||
|
||||
```bash
|
||||
helm upgrade --reuse-values frontend \
|
||||
--set replicaCount=2 \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Downgrade podinfo to version 0.2.0:
|
||||
|
||||
```bash
|
||||
helm upgrade --reuse-values frontend \
|
||||
--set image.tag=0.2.0 \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Rollback to the latest version:
|
||||
|
||||
```bash
|
||||
helm rollback frontend
|
||||
```
|
||||
|
||||
Delete the release:
|
||||
|
||||
```bash
|
||||
helm delete --purge frontend
|
||||
```
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# Auto-scaling guide
|
||||
|
||||

|
||||
|
||||
### Deploy Frontend and Backend services
|
||||
|
||||
Create a release named frontend:
|
||||
|
||||
```bash
|
||||
helm upgrade --install --wait frontend \
|
||||
--set replicaCount=2 \
|
||||
--set service.type=NodePort \
|
||||
--set service.nodePort=30098 \
|
||||
--namespace test \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Setup horizontal pod auto-scaling (HPA) based on memory consumption:
|
||||
|
||||
```bash
|
||||
helm upgrade --reuse-values frontend \
|
||||
--set hpa.enabled=true \
|
||||
--set hpa.maxReplicas=5 \
|
||||
--set hpa.memory=200Mi \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Create a release named backend:
|
||||
|
||||
```bash
|
||||
helm upgrade --install --wait backend \
|
||||
--set replicaCount=1 \
|
||||
--set service.type=ClusterIP \
|
||||
--namespace test \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Setup HPA based on CPU usage:
|
||||
|
||||
```bash
|
||||
helm upgrade --reuse-values backend \
|
||||
--set hpa.enabled=true \
|
||||
--set hpa.maxReplicas=10 \
|
||||
--set hpa.cpu=10 \
|
||||
sp/podinfo
|
||||
```
|
||||
|
||||
Check if the backend ClusterIP service is accessible from within the cluster:
|
||||
|
||||
```bash
|
||||
helm test --cleanup backend
|
||||
```
|
||||
|
||||
Wait for HPA to start receiving metrics:
|
||||
|
||||
```bash
|
||||
kubectl -n test get hpa
|
||||
```
|
||||
|
||||
Run load test:
|
||||
|
||||
```bash
|
||||
#install hey
|
||||
go get -u github.com/rakyll/hey
|
||||
|
||||
#do 10K requests rate limited at 100 QPS
|
||||
hey -n 1000 -q 10 -c 10 -m POST -d "testing" http://<EXTERNAL-IP>:30098/backend
|
||||
```
|
||||
|
||||
Delete the releases:
|
||||
|
||||
```bash
|
||||
helm delete --purge frontend backend
|
||||
```
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
# Monitoring and Alerting
|
||||
|
||||
Prometheus query examples of key metrics to measure and alert upon.
|
||||
|
||||
### PromQL
|
||||
|
||||
**Request Rate** - the number of requests per second by instance
|
||||
|
||||
```
|
||||
sum(irate(http_requests_count{job=~".*podinfo"}[1m])) by (instance)
|
||||
```
|
||||
|
||||
**Request Errors** - the number of failed requests per second by URL path
|
||||
|
||||
```
|
||||
sum(irate(http_requests_count{job=~".*podinfo", status=~"5.."}[1m])) by (path)
|
||||
```
|
||||
|
||||
**Request Duration** - average duration of each request over 10 minutes
|
||||
|
||||
```
|
||||
sum(rate(http_requests_sum{job=~".*podinfo"}[10m])) /
|
||||
sum(rate(http_requests_count{job=~".*podinfo"}[10m]))
|
||||
```
|
||||
|
||||
**Request Latency** - 99th percentile request latency over 10 minutes
|
||||
|
||||
```
|
||||
histogram_quantile(0.99, sum(rate(http_requests_bucket{job=~".*podinfo"}[10m])) by (le))
|
||||
```
|
||||
|
||||
**Goroutines Rate** - the number of running goroutines over 10 minutes
|
||||
|
||||
```
|
||||
sum(irate(go_goroutines{job=~".*podinfo"}[10m]))
|
||||
```
|
||||
|
||||
**Memory Usage** - the average number of bytes in use by instance
|
||||
|
||||
```
|
||||
avg(go_memstats_alloc_bytes{job=~".*podinfo"}) by (instance)
|
||||
```
|
||||
|
||||
**GC Duration** - average duration of GC invocations over 10 minutes
|
||||
|
||||
```
|
||||
sum(rate(go_gc_duration_seconds_sum{job=~".*podinfo"}[10m])) /
|
||||
sum(rate(go_gc_duration_seconds_count{job=~".*podinfo"}[10m]))
|
||||
```
|
||||
|
||||
@@ -1,259 +0,0 @@
|
||||
# StatefulSets with local PV
|
||||
|
||||
Running StatefulSet with with local persistent volumes for bare-metal Kubernetes 1.9 clusters.
|
||||
|
||||

|
||||
|
||||
### Cluster provisioning
|
||||
|
||||
I'm assuming you have three hosts:
|
||||
|
||||
* kube-master-0
|
||||
* kube-node-0
|
||||
* kube-node-1
|
||||
|
||||
In order to use local PVs the Kubernetes API Server, controller-manager, scheduler must be
|
||||
configured with a series of `FEATURE_GATES`.
|
||||
|
||||
On `kube-master-0` machine save the following config as `master.yaml`:
|
||||
|
||||
```yaml
|
||||
apiVersion: kubeadm.k8s.io/v1alpha1
|
||||
kind: MasterConfiguration
|
||||
api:
|
||||
advertiseAddress: #privateip#
|
||||
networking:
|
||||
podSubnet: "10.32.0.0/12" # default Weave Net IP range
|
||||
apiServerExtraArgs:
|
||||
service-node-port-range: 80-32767
|
||||
feature-gates: "PersistentLocalVolumes=true,VolumeScheduling=true,MountPropagation=true"
|
||||
controllerManagerExtraArgs:
|
||||
feature-gates: "PersistentLocalVolumes=true,VolumeScheduling=true,MountPropagation=true"
|
||||
schedulerExtraArgs:
|
||||
feature-gates: "PersistentLocalVolumes=true,VolumeScheduling=true,MountPropagation=true"
|
||||
```
|
||||
|
||||
Replace `#privateip#` with the private IP of `kube-master-0` and initialize the Kubernetes master with:
|
||||
|
||||
```bash
|
||||
kubeadm init --config ./master.yaml
|
||||
```
|
||||
|
||||
Run the kubeadm join command on `kube-node-0` and `kube-node-1`.
|
||||
|
||||
Add the `role` label to the worker nodes:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig ./admin.conf label nodes kube-node-0 role=local-ssd
|
||||
kubectl --kubeconfig ./admin.conf label nodes kube-node-1 role=local-ssd
|
||||
```
|
||||
|
||||
### Persistent volumes provisioning
|
||||
|
||||
Create a Storage Class that will delay volume binding until pod scheduling:
|
||||
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-ssd
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
```
|
||||
|
||||
Save the definition as `storage-class.yaml` and apply it:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f ./storage-class.yaml
|
||||
```
|
||||
|
||||
On each worker node create the following dir:
|
||||
|
||||
```bash
|
||||
mkdir -p /mnt/data
|
||||
```
|
||||
|
||||
Create the Persistent Volume definition for `kube-node-0`:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: podinfo-vol-0
|
||||
annotations:
|
||||
"volume.alpha.kubernetes.io/node-affinity": '{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [
|
||||
{ "matchExpressions": [
|
||||
{ "key": "kubernetes.io/hostname",
|
||||
"operator": "In",
|
||||
"values": ["kube-node-0"]
|
||||
}
|
||||
]}
|
||||
]}}'
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-ssd
|
||||
local:
|
||||
path: /mnt/data
|
||||
```
|
||||
|
||||
Do the same for the second node by changing the PV name to `podinfo-vol-1` and the
|
||||
node selector expression to `kube-node-1`.
|
||||
|
||||
Save the PVs files as `pv-0.yaml` and `pv-1.yaml` and apply them with:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f ./pv-0.yaml,pv-1.yaml
|
||||
```
|
||||
|
||||
### StatefulSet config
|
||||
|
||||
Create a StatefulSet definition with two replicas:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1beta2
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
serviceName: "data"
|
||||
replicas: 2
|
||||
podManagementPolicy: OrderedReady
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: role
|
||||
operator: In
|
||||
values:
|
||||
- local-ssd
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- podinfo
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:0.0.7
|
||||
command:
|
||||
- ./podinfo
|
||||
- -port=9898
|
||||
- -logtostderr=true
|
||||
- -v=2
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9898
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: local-ssd
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
```
|
||||
|
||||
The node affinity spec instructs the scheduler to create the StatefulSet prods only on the
|
||||
nodes that are labeled with `role=local-ssd`. The pod anti-affinity spec prohibits the scheduler
|
||||
to create more than one pod per node.
|
||||
|
||||
The volume claim template will create a PVC on each node targeting volumes with the `local-ssd`
|
||||
storage class.
|
||||
|
||||
Save the above definition as `statefulset.yaml` and apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./statefulset.yaml
|
||||
```
|
||||
|
||||
Once the podinfo StatefulSet has been deployed you can check if the volumes have been claimed with:
|
||||
|
||||
```bash
|
||||
kubectl get pvc
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||
data-prodinfo-0 Bound podinfo-vol-0 1Gi RWO local-ssd 7h
|
||||
data-prodinfo-1 Bound podinfo-vol-1 1Gi RWO local-ssd 7h
|
||||
```
|
||||
|
||||
Create a headless service to expose the podinfo StatefulSet:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: false
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
selector:
|
||||
app: podinfo
|
||||
```
|
||||
|
||||
Save the above definition as `service.yaml` and apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./service.yaml
|
||||
```
|
||||
|
||||
Each podinfo replica has its own DNS address as in `<pod-name>.<service-name>.<namespace>`.
|
||||
|
||||
Create a temporary curl pod in the default namespace in order to access the StatefulSet:
|
||||
|
||||
```yaml
|
||||
kubectl run -i --rm --tty curl --image=radial/busyboxplus:curl --restart=Never -- sh
|
||||
```
|
||||
|
||||
Inside the curl container issue a write command for `podinfo-0`:
|
||||
|
||||
```bash
|
||||
[ root@curl:/ ]$ curl -d 'test' podinfo-0.data:9898/write
|
||||
74657374da39a3ee5e6b4b0d3255bfef95601890afd80709
|
||||
```
|
||||
|
||||
Now read the file using the SHA1 hash:
|
||||
|
||||
```bash
|
||||
[ root@curl:/ ]$ curl -d '74657374da39a3ee5e6b4b0d3255bfef95601890afd80709' podinfo-0.data:9898/read
|
||||
test
|
||||
```
|
||||
|
||||
You can remove the StatefulSet, PV and service with:
|
||||
|
||||
```yaml
|
||||
kubectl delete -f ./service.yaml,statefulset.yaml,pv-0.yaml,pv-1.yaml
|
||||
```
|
||||
|
||||
The Persistent Volumes Claims can be remove with:
|
||||
|
||||
```yaml
|
||||
kubectl delete pvc data-prodinfo-0 data-prodinfo-1
|
||||
```
|
||||
|
||||
256
docs/5-canary.md
256
docs/5-canary.md
@@ -1,256 +0,0 @@
|
||||
# Canary Deployments and A/B Testing
|
||||
|
||||
Canary Deployment and A/B testing with Ambassador's Envoy API Gateway.
|
||||
|
||||

|
||||
|
||||
### Deploy Ambassador
|
||||
|
||||
Deploy Ambassador and expose it with a LoadBalancer service or NodePort if you are running on-prem:
|
||||
|
||||
```bash
|
||||
helm repo add sp https://stefanprodan.github.io/k8s-podinfo
|
||||
|
||||
helm upgrade --install --wait envoy \
|
||||
--set service.type=LoadBalancer \
|
||||
--set replicaCount=2 \
|
||||
sp/ambassador
|
||||
```
|
||||
|
||||
Find the the LoadBalancer IP and store it in an environment variable for later use:
|
||||
|
||||
```bash
|
||||
export ENVOY=$(kubectl get svc --namespace default envoy-ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
```
|
||||
|
||||
Test that Envoy is listening on port 80:
|
||||
|
||||
```bash
|
||||
curl -sI http://$ENVOY | grep envoy
|
||||
server: envoy
|
||||
```
|
||||
|
||||
### Expose services via the API gateway
|
||||
|
||||
Deploy podinfo version 0.1.0 as the general available release:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f ./deploy/canary/ga-dep.yaml
|
||||
```
|
||||
|
||||
Expose the GA release via Ambassador on the `podinfo.test` domain:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ga-podinfo
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: ga-podinfo
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: ga-podinfo.default:9898
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: ga-podinfo
|
||||
```
|
||||
|
||||
Apply the GA service:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/canary/ga-svc.yaml
|
||||
```
|
||||
|
||||
Test that v0.1.0 is available at `podinfo.test` with curl:
|
||||
|
||||
```bash
|
||||
curl -H 'Host: podinfo.test' -sS http://$ENVOY/version | grep version
|
||||
version: 0.1.0
|
||||
```
|
||||
|
||||
### Canary deployment
|
||||
|
||||
Let's assume you have an insiders program for your users and some of them enrolled.
|
||||
Once enrolled your users requests will have a HTTP header like `X-User: insider` attached to every request.
|
||||
|
||||
Simulate an insider user call with:
|
||||
|
||||
```bash
|
||||
curl -H 'X-User: insider' -H 'Host: podinfo.test' -sS http://$ENVOY/version | grep version
|
||||
version: 0.1.0
|
||||
```
|
||||
|
||||
Deploy podinfo version 0.2.0 as a release candidate:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/canary/canarya-dep.yaml
|
||||
```
|
||||
|
||||
Create a service named `canarya-podinfo` and instruct Ambassador to shift the insiders to the RC deployment:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: canarya-podinfo
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: canarya-podinfo
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: canarya-podinfo.default:9898
|
||||
headers:
|
||||
X-User: insider
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: canarya-podinfo
|
||||
```
|
||||
|
||||
Apply the service with:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/canary/canarya-svc.yaml
|
||||
```
|
||||
|
||||
Now if a normal user calls podinfo nothing changed,
|
||||
but if an insider calls podinfo he/she will be routed to version 0.2.0:
|
||||
|
||||
```bash
|
||||
curl -H 'Host: podinfo.test' -sS http://$ENVOY/version|grep version
|
||||
version: 0.1.0
|
||||
|
||||
curl -H 'X-User: insider' -H 'Host: podinfo.test' -sS http://$ENVOY/version|grep version
|
||||
version: 0.2.0
|
||||
```
|
||||
|
||||
### A/B testing
|
||||
|
||||
Let's assume you have a new release candidate version that you want to test on a small subset of your
|
||||
insiders.
|
||||
|
||||
Deploy podinfo version 0.2.1 as a release candidate:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/canary/canaryb-dep.yaml
|
||||
```
|
||||
|
||||
Create a service named `canaryb-podinfo` and instruct Ambassador to shift ten percent of
|
||||
the insiders traffic to v0.2.1:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: canaryb-podinfo
|
||||
annotations:
|
||||
getambassador.io/config: |
|
||||
---
|
||||
apiVersion: ambassador/v0
|
||||
kind: Mapping
|
||||
name: canaryb-podinfo
|
||||
prefix: /
|
||||
host: podinfo.test
|
||||
service: canaryb-podinfo.default:9898
|
||||
weight: 10
|
||||
headers:
|
||||
X-User: insider
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9898
|
||||
targetPort: 9898
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: canaryb-podinfo
|
||||
```
|
||||
|
||||
Apply the service with:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./deploy/canary/canaryb-svc.yaml
|
||||
```
|
||||
|
||||
Now let's call the service in a while loop, one in ten calls will be routed to v0.2.1:
|
||||
|
||||
```bash
|
||||
while true; do sleep 1; curl -H 'X-User: insider' -H 'Host: podinfo.test' -sS http://$ENVOY/version|grep version; done
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.0
|
||||
version: 0.2.1
|
||||
```
|
||||
|
||||
### CI/CD Automation
|
||||
|
||||
Once you have in place the GA and Canary releases you would probably want to automate
|
||||
the deployment of patches for 0.1.x and 0.2.x.
|
||||
|
||||
Continuous delivery can be accomplished with a GitOps pipeline that involves several tools
|
||||
like TravisCI and Weave Flux.
|
||||
|
||||

|
||||
|
||||
GA GitOps pipeline steps:
|
||||
|
||||
* An engineer cuts a new release by tagging the master branch as `0.1.1`
|
||||
* GitHub notifies TravisCI that a new tag has been committed
|
||||
* TravisCI builds the Docker image, tags it as `0.1.1` and pushes it to Docker Hub
|
||||
* Weave Flux detects the new tag on Docker Hub and updates the GA deployment definition
|
||||
* Weave Flux commits the GA deployment definition to GitHub
|
||||
* Weave Flux triggers a rolling update of the GA deployment
|
||||
|
||||
The canary continuous delivery follows the same pattern, the only difference is that Weave Flux must be
|
||||
configured with a different filter:
|
||||
|
||||
* `0.1.*` for GA
|
||||
* `0.2.*` for Canary
|
||||
|
||||
### Monitoring
|
||||
|
||||
Install Grafana chart with Weave Cloud data source:
|
||||
|
||||
```bash
|
||||
helm install --name weave \
|
||||
--set service.type=LoadBalancer \
|
||||
--set token=WEAVE-CLOUD-TOKEN \
|
||||
sp/grafana
|
||||
```
|
||||
|
||||
Start a load test for both GA and Canary deployments:
|
||||
|
||||
```bash
|
||||
# GA
|
||||
hey -n 10000 -host podinfo.test http://$ENVOY
|
||||
|
||||
# Canary
|
||||
hey -n 10000 -host podinfo.test -H 'X-Client: insider' http://$ENVOY
|
||||
```
|
||||
|
||||
Open Grafana's URL and navigate to Ambassador dashboard:
|
||||
|
||||

|
||||
|
||||
TODO: GA vs Canary RED metrics dashboard
|
||||
125
docs/6-ngrok.md
125
docs/6-ngrok.md
@@ -1,125 +0,0 @@
|
||||
# Expose Kubernetes services over HTTPS with Ngrok
|
||||
|
||||
Have you ever wanted to expose a Kubernetes service running on Minikube on the internet and have a
|
||||
temporary HTTPS address for it? If so then Ngrok is the perfect solution to do that without any
|
||||
firewall, NAT or DNS configurations.
|
||||
If you are developing an application that works with webhooks or oauth callbacks
|
||||
Ngrok can create a tunnel between your Kubernetes service and their cloud platform and provide you with
|
||||
a unique HTTPS URL that you can use to test and debug your service.
|
||||
|
||||
For this purpose I've made a Helm chart that you can use to deploy Ngrok on Kubernetes by specifying
|
||||
a ClusterIP service that will get exposed on the internet.
|
||||
|
||||
What follows is a step-by-step guide on how you can use Ngrok as a reverse proxy to
|
||||
receive GitHub notifications via webhooks in an application hosted on your local Minikube.
|
||||
|
||||
### Deploy a webhook receiver
|
||||
|
||||
In order to receive notifications from GitHub you need a web application that exposes a
|
||||
HTTP POST endpoint and accepts a JSON payload. [Podinfo](https://github.com/stefanprodan/k8s-podinfo)
|
||||
is a tiny web app made with Go that can receive any kind of payload on the `/echo` route.
|
||||
Let's deploy podinfo using Helm.
|
||||
|
||||
If you don't have Helm running on your Kubernetes cluster here is how you can set it up.
|
||||
|
||||
First install Helm CLI:
|
||||
|
||||
```bash
|
||||
brew install kubernetes-helm
|
||||
```
|
||||
|
||||
Create a service account for Tiller:
|
||||
|
||||
```bash
|
||||
kubectl -n kube-system create sa tiller
|
||||
```
|
||||
|
||||
Create a cluster role binding for Tiller:
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding tiller-cluster-rule \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:tiller
|
||||
```
|
||||
|
||||
Deploy Tiller in kube-system namespace:
|
||||
|
||||
```bash
|
||||
helm init --skip-refresh --upgrade --service-account tiller
|
||||
```
|
||||
|
||||
The `podinfo` and `ngrok` charts are hosted on GitHub. Add the k8s-podinfo repo:
|
||||
|
||||
```bash
|
||||
helm repo add sp https://stefanprodan.github.io/k8s-podinfo
|
||||
```
|
||||
|
||||
Install `podinfo`:
|
||||
|
||||
```bash
|
||||
helm install sp/podinfo --name webhook
|
||||
```
|
||||
|
||||
This deploys `podinfo` in the default namespace and
|
||||
creates a ClusterIP service with the address `webhook-podinfo:9898`.
|
||||
|
||||
### Deploy Ngrok
|
||||
|
||||
Before you begin go to [ngrok.com](https://ngrok.com) and register for a free account.
|
||||
|
||||
Ngrok will create a token for you, use it when installing the Ngrok chart.
|
||||
|
||||
Install Ngrok:
|
||||
|
||||
```bash
|
||||
$ helm install sp/ngrok --name tunnel \
|
||||
--set token=NGROK-TOKEN \
|
||||
--set service.type=NodePort \
|
||||
--set expose.service=webhook-podinfo:9898
|
||||
```
|
||||
|
||||
The above command deploys Ngrok in the default namespace and exposes the Ngrok web UI
|
||||
on a random node port. Find the port with:
|
||||
|
||||
```bash
|
||||
kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services tunnel-ngrok
|
||||
```
|
||||
|
||||
Now open a browser and navigate to `http://<KUBE-IP>:<NGROK-PORT>/status`. On the status page you
|
||||
should see the public HTTPS address generated by Ngrok.
|
||||
|
||||
Use curl to test if you can reach podinfo `/echo` route:
|
||||
|
||||
```bash
|
||||
curl -d '{"message": "testing ngrok"}' https://122261e6.ngrok.io/echo
|
||||
```
|
||||
|
||||
On the status page you should see that the total number of connections has increased.
|
||||
|
||||

|
||||
|
||||
### Setup GitHub webhook
|
||||
|
||||
Go to GitHub and create a new repository or use one that you already have.
|
||||
On your repo page go to _Settings -> Webhooks -> Add Webhook_ and enter the Ngrok HTTPS URL adding
|
||||
`/echo` at the end:
|
||||
|
||||

|
||||
|
||||
For Content-type select _application/json_, check _Send me everything_ and click on _Add webhook_.
|
||||
|
||||
Once you hit the add button GitHub will make a call to the `/echo` URL. Using Ngrok web UI you can
|
||||
inspect the GitHub payload. Navigate to `http://<KUBE-IP>:<NGROK-PORT>/inspect/http` and you
|
||||
should see the request body:
|
||||
|
||||

|
||||
|
||||
Ngrok not only makes it very easy to expose Kubernetes services on the internet
|
||||
but also gives a powerful tool to inspect the traffic to your applications.
|
||||
|
||||
You can tear all down by deleting the Helm releases with:
|
||||
|
||||
```bash
|
||||
helm delete --purge tunnel webhook
|
||||
```
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user