mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 02:20:22 +00:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6fa5e3891 | ||
|
|
a305a0b705 | ||
|
|
dfe619e2ea | ||
|
|
2b3d425b70 | ||
|
|
6e55fea413 | ||
|
|
b6a08b6615 | ||
|
|
eaa6906516 | ||
|
|
62a7a92f2a | ||
|
|
3aeb0945c5 | ||
|
|
e8c85efeae | ||
|
|
6651f6452b | ||
|
|
0ca48d77be | ||
|
|
a9e0e018e3 | ||
|
|
122d11f445 | ||
|
|
b03555858c | ||
|
|
dcc5a40441 | ||
|
|
8c949f59de | ||
|
|
e8d91a0375 | ||
|
|
fae9aa664d | ||
|
|
c31e9e5a96 | ||
|
|
99fff98274 | ||
|
|
11d84bf35d | ||
|
|
e56ba480c7 |
@@ -78,6 +78,17 @@ jobs:
|
||||
- run: test/e2e-istio.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
e2e-kubernetes-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-kubernetes.sh
|
||||
- run: test/e2e-kubernetes-tests.sh
|
||||
|
||||
e2e-smi-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -133,6 +144,47 @@ jobs:
|
||||
- run: test/e2e-linkerd.sh
|
||||
- run: test/e2e-linkerd-tests.sh
|
||||
|
||||
push-helm-charts:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install kubectl
|
||||
command: sudo curl -L https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && sudo chmod +x /usr/local/bin/kubectl
|
||||
- run:
|
||||
name: Install helm
|
||||
command: sudo curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.14.2-linux-amd64.tar.gz | tar xz && sudo mv linux-amd64/helm /bin/helm && sudo rm -rf linux-amd64
|
||||
- run:
|
||||
name: Initialize helm
|
||||
command: helm init --client-only --kubeconfig=$HOME/.kube/kubeconfig
|
||||
- run:
|
||||
name: Lint charts
|
||||
command: |
|
||||
helm lint ./charts/*
|
||||
- run:
|
||||
name: Package charts
|
||||
command: |
|
||||
mkdir $HOME/charts
|
||||
helm package ./charts/* --destination $HOME/charts
|
||||
- run:
|
||||
name: Publish charts
|
||||
command: |
|
||||
if echo "${CIRCLE_TAG}" | grep -Eq "[0-9]+(\.[0-9]+)*(-[a-z]+)?$"; then
|
||||
REPOSITORY="https://weaveworksbot:${GITHUB_TOKEN}@github.com/weaveworks/flagger.git"
|
||||
git config user.email weaveworksbot@users.noreply.github.com
|
||||
git config user.name weaveworksbot
|
||||
git remote set-url origin ${REPOSITORY}
|
||||
git checkout gh-pages
|
||||
mv -f $HOME/charts/*.tgz .
|
||||
helm repo index . --url https://flagger.app
|
||||
git add .
|
||||
git commit -m "Publish Helm charts v${CIRCLE_TAG}"
|
||||
git push origin gh-pages
|
||||
else
|
||||
echo "Not a release! Skip charts publish"
|
||||
fi
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-test-push:
|
||||
@@ -145,7 +197,7 @@ workflows:
|
||||
- e2e-istio-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-smi-istio-testing:
|
||||
- e2e-kubernetes-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
# - e2e-supergloo-testing:
|
||||
@@ -164,7 +216,7 @@ workflows:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-istio-testing
|
||||
- e2e-smi-istio-testing
|
||||
- e2e-kubernetes-testing
|
||||
#- e2e-supergloo-testing
|
||||
- e2e-gloo-testing
|
||||
- e2e-nginx-testing
|
||||
@@ -187,6 +239,14 @@ workflows:
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-binary:
|
||||
requires:
|
||||
- push-container
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-helm-charts:
|
||||
requires:
|
||||
- push-container
|
||||
filters:
|
||||
|
||||
24
CHANGELOG.md
24
CHANGELOG.md
@@ -2,6 +2,26 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.18.2 (2019-08-05)
|
||||
|
||||
Fixes multi-port support for Istio
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix port discovery for multiple port services [#267](https://github.com/weaveworks/flagger/pull/267)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update e2e testing to Istio v1.2.3, Gloo v0.18.8 and NGINX ingress chart v1.12.1 [#268](https://github.com/weaveworks/flagger/pull/268)
|
||||
|
||||
## 0.18.1 (2019-07-30)
|
||||
|
||||
Fixes Blue/Green style deployments for Kubernetes and Linkerd providers
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix Blue/Green metrics provider and add e2e tests [#261](https://github.com/weaveworks/flagger/pull/261)
|
||||
|
||||
## 0.18.0 (2019-07-29)
|
||||
|
||||
Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-gating) and pausing/resuming an ongoing analysis
|
||||
@@ -18,8 +38,8 @@ Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-ga
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
|
||||
- upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
|
||||
- Due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
|
||||
- Upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
|
||||
|
||||
## 0.17.0 (2019-07-08)
|
||||
|
||||
|
||||
5
Makefile
5
Makefile
@@ -63,8 +63,9 @@ test: test-fmt test-codegen
|
||||
|
||||
helm-package:
|
||||
cd charts/ && helm package ./*
|
||||
mv charts/*.tgz docs/
|
||||
helm repo index docs --url https://weaveworks.github.io/flagger --merge ./docs/index.yaml
|
||||
mv charts/*.tgz bin/
|
||||
curl -s https://raw.githubusercontent.com/weaveworks/flagger/gh-pages/index.yaml > ./bin/index.yaml
|
||||
helm repo index bin --url https://flagger.app --merge ./bin/index.yaml
|
||||
|
||||
helm-up:
|
||||
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.18.0
|
||||
image: weaveworks/flagger:0.18.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.18.0
|
||||
appVersion: 0.18.0
|
||||
version: 0.18.2
|
||||
appVersion: 0.18.2
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
@@ -32,6 +32,10 @@ spec:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: flagger
|
||||
securityContext:
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.18.0
|
||||
tag: 0.18.2
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
|
||||
@@ -232,8 +232,7 @@ spec:
|
||||
mode: ISTIO_MUTUAL
|
||||
```
|
||||
|
||||
Both port `8080` and `9090` will be added to the ClusterIP services but the virtual service
|
||||
will point to the port specified in `spec.service.port`.
|
||||
Both port `8080` and `9090` will be added to the ClusterIP services.
|
||||
|
||||
### Label selectors
|
||||
|
||||
|
||||
@@ -37,8 +37,10 @@ Deploy the load testing service to generate traffic during the canary analysis:
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set meshName=global.appmesh-system \
|
||||
--set "backends[0]=podinfo.test"
|
||||
--set meshName=global \
|
||||
--set "backends[0]=podinfo.test" \
|
||||
--set "backends[1]=podinfo-canary.test" \
|
||||
--set "backends[2]=podinfo-primary.test"
|
||||
```
|
||||
|
||||
Create a canary custom resource:
|
||||
@@ -67,7 +69,7 @@ spec:
|
||||
# container port
|
||||
port: 9898
|
||||
# App Mesh reference
|
||||
meshName: global.appmesh-system
|
||||
meshName: global
|
||||
# App Mesh egress (optional)
|
||||
backends:
|
||||
- backend.test
|
||||
|
||||
@@ -8,4 +8,4 @@ resources:
|
||||
- deployment.yaml
|
||||
images:
|
||||
- name: weaveworks/flagger
|
||||
newTag: 0.18.0
|
||||
newTag: 0.18.2
|
||||
|
||||
@@ -8,6 +8,7 @@ spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -mesh-provider=istio
|
||||
- -metrics-server=http://prometheus:9090
|
||||
- -slack-user=flagger
|
||||
|
||||
@@ -8,6 +8,7 @@ spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -mesh-provider=kubernetes
|
||||
- -metrics-server=http://flagger-prometheus:9090
|
||||
- -slack-user=flagger
|
||||
|
||||
@@ -8,6 +8,7 @@ spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -mesh-provider=linkerd
|
||||
- -metrics-server=http://linkerd-prometheus:9090
|
||||
- -slack-user=flagger
|
||||
|
||||
@@ -614,8 +614,8 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
if r.Spec.Provider != "" {
|
||||
metricsProvider = r.Spec.Provider
|
||||
|
||||
// set the metrics provider to Linkerd Prometheus when using NGINX as Linkerd Ingress
|
||||
if r.Spec.Provider == "nginx" && strings.Contains(c.meshProvider, "linkerd") {
|
||||
// set the metrics server to Linkerd Prometheus when Linkerd is the default mesh provider
|
||||
if strings.Contains(c.meshProvider, "linkerd") {
|
||||
metricsProvider = "linkerd"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,10 @@ func (factory Factory) Observer(provider string) Interface {
|
||||
return &HttpObserver{
|
||||
client: factory.Client,
|
||||
}
|
||||
case provider == "kubernetes":
|
||||
return &HttpObserver{
|
||||
client: factory.Client,
|
||||
}
|
||||
case provider == "appmesh":
|
||||
return &EnvoyObserver{
|
||||
client: factory.Client,
|
||||
|
||||
@@ -2,6 +2,7 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -236,7 +236,7 @@ func (ir *IstioRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
) {
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
vs := &istiov1alpha3.VirtualService{}
|
||||
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
|
||||
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
err = fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
|
||||
@@ -283,7 +283,7 @@ func (ir *IstioRouter) SetRoutes(
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
canaryName := fmt.Sprintf("%s-canary", targetName)
|
||||
|
||||
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
|
||||
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
|
||||
@@ -383,12 +383,5 @@ func makeDestination(canary *flaggerv1.Canary, host string, weight int) istiov1a
|
||||
Weight: weight,
|
||||
}
|
||||
|
||||
// if port discovery is enabled then we need to explicitly set the destination port
|
||||
if canary.Spec.Service.PortDiscovery {
|
||||
dest.Destination.Port = &istiov1alpha3.PortSelector{
|
||||
Number: uint32(canary.Spec.Service.Port),
|
||||
}
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.18.0"
|
||||
var VERSION = "0.18.2"
|
||||
var REVISION = "unknown"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
GLOO_VER="0.17.6"
|
||||
GLOO_VER="0.18.8"
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
@@ -12,7 +12,7 @@ helm upgrade -i gloo gloo/gloo --version ${GLOO_VER} \
|
||||
--namespace gloo-system
|
||||
|
||||
kubectl -n gloo-system rollout status deployment/gloo
|
||||
kubectl -n gloo-system rollout status deployment/gateway-proxy
|
||||
kubectl -n gloo-system rollout status deployment/gateway-proxy-v2
|
||||
kubectl -n gloo-system get all
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
ISTIO_VER="1.2.2"
|
||||
ISTIO_VER="1.2.3"
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
|
||||
98
test/e2e-kubernetes-tests.sh
Executable file
98
test/e2e-kubernetes-tests.sh
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs e2e tests for Blue/Green initialization, analysis and promotion
|
||||
# Prerequisites: Kubernetes Kind, Kustomize
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Creating test namespace'
|
||||
kubectl create namespace test
|
||||
|
||||
echo '>>> Installing the load tester'
|
||||
kubectl apply -k ${REPO_ROOT}/kustomize/tester
|
||||
kubectl -n test rollout status deployment/flagger-loadtester
|
||||
|
||||
echo '>>> Initialising canary'
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: kubernetes
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
portDiscovery: true
|
||||
canaryAnalysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
iterations: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
echo '>>> Waiting for primary to be ready'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary initialization test passed'
|
||||
|
||||
echo '>>> Triggering canary deployment'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.7.0
|
||||
|
||||
echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.7.0' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n flagger-system logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
17
test/e2e-kubernetes.sh
Executable file
17
test/e2e-kubernetes.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Loading Flagger image'
|
||||
kind load docker-image test/flagger:latest
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
kubectl apply -k ${REPO_ROOT}/kustomize/kubernetes
|
||||
|
||||
kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
|
||||
|
||||
kubectl -n flagger-system rollout status deployment/flagger
|
||||
kubectl -n flagger-system rollout status deployment/flagger-prometheus
|
||||
@@ -4,7 +4,7 @@ set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
NGINX_VERSION=1.8.2
|
||||
NGINX_VERSION=1.12.1
|
||||
|
||||
echo '>>> Installing NGINX Ingress'
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress --version=${NGINX_VERSION} \
|
||||
|
||||
@@ -33,6 +33,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
portDiscovery: true
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
@@ -142,6 +143,7 @@ spec:
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
portDiscovery: true
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 10s
|
||||
@@ -202,4 +204,4 @@ echo '✔ A/B testing promotion test passed'
|
||||
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
|
||||
echo '✔ All tests passed'
|
||||
echo '✔ All tests passed'
|
||||
|
||||
@@ -20,6 +20,7 @@ spec:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9797"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
@@ -31,9 +32,13 @@ spec:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 9797
|
||||
name: http-prom
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --port-metrics=9797
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
|
||||
Reference in New Issue
Block a user