mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-16 19:09:55 +00:00
Compare commits
77 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1902884b56 | ||
|
|
98d2805267 | ||
|
|
24a74d3589 | ||
|
|
15463456ec | ||
|
|
752eceed4b | ||
|
|
eadce34d6f | ||
|
|
11ccf34bbc | ||
|
|
e308678ed5 | ||
|
|
cbe72f0aa2 | ||
|
|
bc84e1c154 | ||
|
|
344bd45a0e | ||
|
|
72014f736f | ||
|
|
0a2949b6ad | ||
|
|
2ff695ecfe | ||
|
|
8d0b54e059 | ||
|
|
121a65fad0 | ||
|
|
ecaa203091 | ||
|
|
6d0e3c6468 | ||
|
|
c933476fff | ||
|
|
1335210cf5 | ||
|
|
9d12794600 | ||
|
|
d57fc7d03e | ||
|
|
1f9f6fb55a | ||
|
|
948df55de3 | ||
|
|
8914f26754 | ||
|
|
79b3370892 | ||
|
|
a233b99f0b | ||
|
|
0d94c01678 | ||
|
|
00151e92fe | ||
|
|
f7db0210ea | ||
|
|
cf3ba35fb9 | ||
|
|
177dc824e3 | ||
|
|
5f544b90d6 | ||
|
|
921ac00383 | ||
|
|
7df7218978 | ||
|
|
e4c6903a01 | ||
|
|
027342dc72 | ||
|
|
e17a747785 | ||
|
|
e477b37bd0 | ||
|
|
ad25068375 | ||
|
|
c92230c109 | ||
|
|
9e082d9ee3 | ||
|
|
cfd610ac55 | ||
|
|
82067f13bf | ||
|
|
242d79e49d | ||
|
|
4f01ecde5a | ||
|
|
61141c7479 | ||
|
|
62429ff710 | ||
|
|
82a1f45cc1 | ||
|
|
1a95fc2a9c | ||
|
|
13816eeafa | ||
|
|
5279f73c17 | ||
|
|
d196bb2856 | ||
|
|
3f8f634a1b | ||
|
|
5ba27c898e | ||
|
|
57f1b63fa1 | ||
|
|
d69e203479 | ||
|
|
4d7fae39a8 | ||
|
|
2dc554c92a | ||
|
|
21c394ef7f | ||
|
|
2173bfc1a0 | ||
|
|
a19d016e14 | ||
|
|
8f1b5df9e2 | ||
|
|
2d6b8ecfdf | ||
|
|
8093612011 | ||
|
|
39dc761e32 | ||
|
|
0c68983c62 | ||
|
|
37ebbf14f9 | ||
|
|
156488c8d5 | ||
|
|
68d1f583cc | ||
|
|
aa24d6ff7e | ||
|
|
58c2c19f1e | ||
|
|
2a91149211 | ||
|
|
868482c240 | ||
|
|
e5612bca50 | ||
|
|
d21fb1afe8 | ||
|
|
89d0a533e2 |
@@ -1,6 +1,6 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
e2e-testing:
|
||||
e2e-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
@@ -9,14 +9,46 @@ jobs:
|
||||
- run: test/e2e-build.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
e2e-supergloo-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-supergloo.sh
|
||||
- run: test/e2e-build.sh supergloo:test.supergloo-system
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-nginx-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-build.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-and-test:
|
||||
jobs:
|
||||
- e2e-testing:
|
||||
- e2e-istio-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
- e2e-supergloo-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
- e2e-nginx-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
41
CHANGELOG.md
41
CHANGELOG.md
@@ -2,6 +2,47 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.13.2 (2019-04-11)
|
||||
|
||||
Fixes for Jenkins X deployments (prevent the jx GC from removing the primary instance)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Do not copy labels from canary to primary deployment [#178](https://github.com/weaveworks/flagger/pull/178)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add NGINX ingress controller e2e and unit tests [#176](https://github.com/weaveworks/flagger/pull/176)
|
||||
|
||||
## 0.13.1 (2019-04-09)
|
||||
|
||||
Fixes for custom metrics checks and NGINX Prometheus queries
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix promql queries for custom checks and NGINX [#174](https://github.com/weaveworks/flagger/pull/174)
|
||||
|
||||
## 0.13.0 (2019-04-08)
|
||||
|
||||
Adds support for [NGINX](https://docs.flagger.app/usage/nginx-progressive-delivery) ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Add support for nginx ingress controller (weighted traffic and A/B testing) [#170](https://github.com/weaveworks/flagger/pull/170)
|
||||
- Add Prometheus add-on to Flagger Helm chart for App Mesh and NGINX [79b3370](https://github.com/weaveworks/flagger/pull/170/commits/79b337089294a92961bc8446fd185b38c50a32df)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix duplicate hosts Istio error when using wildcards [#162](https://github.com/weaveworks/flagger/pull/162)
|
||||
|
||||
## 0.12.0 (2019-04-29)
|
||||
|
||||
Adds support for [SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
|
||||
|
||||
#### Features
|
||||
|
||||
- Supergloo support for canary deployment (weighted traffic) [#151](https://github.com/weaveworks/flagger/pull/151)
|
||||
|
||||
## 0.11.1 (2019-04-18)
|
||||
|
||||
Move Flagger and the load tester container images to Docker Hub
|
||||
|
||||
609
Gopkg.lock
generated
609
Gopkg.lock
generated
@@ -6,16 +6,32 @@
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4"
|
||||
version = "v0.36.0"
|
||||
revision = "fcb9a2d5f791d07be64506ab54434de65989d370"
|
||||
version = "v0.37.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f12358576cd79bba0ae626530d23cde63416744f486c8bc817802c6907eaadd7"
|
||||
name = "github.com/armon/go-metrics"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f0300d1749da6fa982027e449ec0c7a145510c3c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:13d5750ba049ce46bf931792803f1d5584b04026df9badea5931e33c22aa34ee"
|
||||
name = "github.com/avast/retry-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "08d411bf8302219fe47ca04dbdf9de892010c5e5"
|
||||
version = "v2.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
@@ -25,6 +41,14 @@
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
|
||||
name = "github.com/evanphx/json-patch"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
|
||||
name = "github.com/ghodss/yaml"
|
||||
@@ -34,25 +58,20 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2"
|
||||
digest = "1:895d2773c9e78e595dd5f946a25383d579d3094a9d8d9306dba27359f190f275"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor",
|
||||
"sortkeys",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
|
||||
source = "github.com/istio/glog"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||
@@ -62,26 +81,35 @@
|
||||
revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4"
|
||||
digest = "1:a98a0b00720dc3149bf3d0c8d5726188899e5bab2f5072b9a7ef82958fbc98b2"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f"
|
||||
version = "v1.3.0"
|
||||
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
|
||||
@@ -98,12 +126,12 @@
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
revision = "f140a6486e521aad38f5917de355cbf147cc0496"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
|
||||
@@ -136,6 +164,70 @@
|
||||
revision = "b4df798d65426f8c8ab5ca5f9987aec5575d26c9"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:adf097b949dbc1e452fbad15322c78651f6e7accb4661dffa38fed30273c5966"
|
||||
name = "github.com/hashicorp/consul"
|
||||
packages = ["api"]
|
||||
pruneopts = "NUT"
|
||||
revision = "ea5210a30e154f4da9a4c8e729b45b8ce7b9b92c"
|
||||
version = "v1.4.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19"
|
||||
name = "github.com/hashicorp/errwrap"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fff05cb0c34d2decaeb27bb6ab6b73a6947c3009d725160070da55f9511fd410"
|
||||
name = "github.com/hashicorp/go-cleanhttp"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "eda1e5db218aad1db63ca4642c8906b26bcf2744"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1cf16b098a70d6c02899608abbb567296d11c7b830635014dfe6124a02dc1369"
|
||||
name = "github.com/hashicorp/go-immutable-radix"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2ed138049ab373f696db2081ca48f15c5abdf20893803612a284f2bdce2bf443"
|
||||
name = "github.com/hashicorp/go-multierror"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b3496707ba69dd873a870238644aa8ac259ee67fc4fd05caf37b608e7053e1f7"
|
||||
name = "github.com/hashicorp/go-retryablehttp"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "357460732517ec3b57c05c51443296bdd6df1874"
|
||||
version = "v0.5.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cdb5ce76cd7af19e3d2d5ba9b6458a2ee804f0d376711215dd3df5f51100d423"
|
||||
name = "github.com/hashicorp/go-rootcerts"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "63503fb4e1eca22f9ae0f90b49c5d5538a0e87eb"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6c69626c7aacae1e573084cdb6ed55713094ba56263f687e5d1750053bd08598"
|
||||
name = "github.com/hashicorp/go-sockaddr"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "c7188e74f6acae5a989bdc959aa779f8b9f42faf"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
@@ -147,6 +239,48 @@
|
||||
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:39f543569bf189e228c84a294c50aca8ea56c82b3d9df5c9b788249907d7049a"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
"hcl/ast",
|
||||
"hcl/parser",
|
||||
"hcl/scanner",
|
||||
"hcl/strconv",
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:acc81e4e4289587b257ccdfccbc6eaf16d4c2fb57dda73c6bb349bf50f02501f"
|
||||
name = "github.com/hashicorp/serf"
|
||||
packages = ["coordinate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "15cfd05de3dffb3664aa37b06e91f970b825e380"
|
||||
version = "v0.8.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cded54cacfb6fdc86b916031e4113cbc50dfb55e92535651733604f1e3a8ce59"
|
||||
name = "github.com/hashicorp/vault"
|
||||
packages = [
|
||||
"api",
|
||||
"helper/compressutil",
|
||||
"helper/consts",
|
||||
"helper/hclutil",
|
||||
"helper/jsonutil",
|
||||
"helper/parseutil",
|
||||
"helper/strutil",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "36aa8c8dd1936e10ebd7a4c1d412ae0e6f7900bd"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f"
|
||||
name = "github.com/imdario/mergo"
|
||||
@@ -156,19 +290,55 @@
|
||||
version = "v0.3.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
|
||||
name = "github.com/istio/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
|
||||
digest = "1:4e903242fe176238aaa469f59d7035f5abf2aa9acfefb8964ddd203651b574e9"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29"
|
||||
version = "v1.1.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2760a8fe9b7bcc95c397bc85b69bc7a11eed03c644b45e8c00c581c114486d3f"
|
||||
name = "github.com/k0kubun/pp"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3d73dea227e0711e38b911ffa6fbafc8ff6b2991"
|
||||
version = "v3.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:493282a1185f77368678d3886b7e999e37e920d22f69669545f1ee5ae10743a2"
|
||||
name = "github.com/linkerd/linkerd2"
|
||||
packages = [
|
||||
"controller/gen/apis/serviceprofile",
|
||||
"controller/gen/apis/serviceprofile/v1alpha1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "5e47cb150a33150e5aeddc6672d8a64701a970de"
|
||||
version = "stable-2.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0dbba7d4d4f3eeb01acd81af338ff4a3c4b0bb814d87368ea536e616f383240d"
|
||||
name = "github.com/lyft/protoc-gen-validate"
|
||||
packages = ["validate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "ff6f7a9bc2e5fe006509b9f8c7594c41a953d50f"
|
||||
version = "v0.0.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9785a54031460a402fab4e4bbb3124c8dd9e9f7b1982109fef605cb91632d480"
|
||||
name = "github.com/mattn/go-colorable"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a70a971f94a22f2fa562ffcc7a0eb45f5daf045"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:85edcc76fa95b8b312642905b56284f4fe5c42d8becb219481adba7e97d4f5c5"
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "c2a7a6ca930a4cd0bc33a3f298eb71960732a3a7"
|
||||
version = "v0.0.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
@@ -178,6 +348,30 @@
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f9f72e583aaacf1d1ac5d6121abd4afd3c690baa9e14e1d009df26bf831ba347"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e34decedbcec12332c5836d16a6838f864e0b43c5b4f9aa9d9a85101015f87c2"
|
||||
name = "github.com/mitchellh/hashstructure"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a38c50148365edc8df43c1580c48fb2b3a1e9cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||
name = "github.com/modern-go/concurrent"
|
||||
@@ -210,6 +404,25 @@
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:122724025b9505074138089f78f543f643ae3a8fab6d5b9edf72cce4dd49cc91"
|
||||
name = "github.com/pierrec/lz4"
|
||||
packages = [
|
||||
".",
|
||||
"internal/xxh32",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "315a67e90e415bcdaff33057da191569bf4d8479"
|
||||
version = "v2.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
@@ -238,22 +451,118 @@
|
||||
"model",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
|
||||
version = "v0.2.0"
|
||||
revision = "a82f4c12f983cc2649298185f296632953e50d3e"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:0a2e604afa3cbf53a1ddade2f240ee8472eded98856dd8c7cfbfea392ddbbfc7"
|
||||
digest = "1:7813f698f171bd7132b123364433e1b0362f7fdb4ed7f4a20df595a4c2410f8a"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "8368d24ba045f26503eb745b624d930cbe214c79"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:38969f56c08bdf302a73a7c8adb0520dc9cb4cd54206cbe7c8a147da52cc0890"
|
||||
name = "github.com/radovskyb/watcher"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d8b41ca2397a9b5cfc26adb10edbbcde40187a87"
|
||||
version = "v1.0.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:09d61699d553a4e6ec998ad29816177b1f3d3ed0c18fe923d2c174ec065c99c8"
|
||||
name = "github.com/ryanuber/go-glob"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "51a8f68e6c24dc43f1e371749c89a267de4ebc53"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cad7db5ed31bef1f9e4429ad0927b40dbf31535167ec4c768fd5985565111ea5"
|
||||
name = "github.com/solo-io/gloo"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"iostats",
|
||||
"nfs",
|
||||
"xfs",
|
||||
"projects/gloo/pkg/api/v1",
|
||||
"projects/gloo/pkg/api/v1/plugins",
|
||||
"projects/gloo/pkg/api/v1/plugins/aws",
|
||||
"projects/gloo/pkg/api/v1/plugins/azure",
|
||||
"projects/gloo/pkg/api/v1/plugins/consul",
|
||||
"projects/gloo/pkg/api/v1/plugins/faultinjection",
|
||||
"projects/gloo/pkg/api/v1/plugins/grpc",
|
||||
"projects/gloo/pkg/api/v1/plugins/grpc_web",
|
||||
"projects/gloo/pkg/api/v1/plugins/hcm",
|
||||
"projects/gloo/pkg/api/v1/plugins/kubernetes",
|
||||
"projects/gloo/pkg/api/v1/plugins/rest",
|
||||
"projects/gloo/pkg/api/v1/plugins/retries",
|
||||
"projects/gloo/pkg/api/v1/plugins/static",
|
||||
"projects/gloo/pkg/api/v1/plugins/transformation",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "bbced9601137e764853b2fad7ec3e2dc4c504e02"
|
||||
revision = "f767e64f7ee60139ff79e2abb547cd149067da04"
|
||||
version = "v0.13.17"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4a5b267a6929e4c3980066d745f87993c118f7797373a43537fdc69b3ee2d37e"
|
||||
name = "github.com/solo-io/go-utils"
|
||||
packages = [
|
||||
"contextutils",
|
||||
"errors",
|
||||
"kubeutils",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a27432d89f419897df796a17456410e49a9727c3"
|
||||
version = "v0.7.11"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1cc9a8be450b7d9e77f100c7662133bf7bc8f0832f4eed3542599d3f35d28c46"
|
||||
name = "github.com/solo-io/solo-kit"
|
||||
packages = [
|
||||
"pkg/api/v1/clients",
|
||||
"pkg/api/v1/clients/configmap",
|
||||
"pkg/api/v1/clients/consul",
|
||||
"pkg/api/v1/clients/factory",
|
||||
"pkg/api/v1/clients/file",
|
||||
"pkg/api/v1/clients/kube",
|
||||
"pkg/api/v1/clients/kube/cache",
|
||||
"pkg/api/v1/clients/kube/controller",
|
||||
"pkg/api/v1/clients/kube/crd",
|
||||
"pkg/api/v1/clients/kube/crd/client/clientset/versioned",
|
||||
"pkg/api/v1/clients/kube/crd/client/clientset/versioned/scheme",
|
||||
"pkg/api/v1/clients/kube/crd/client/clientset/versioned/typed/solo.io/v1",
|
||||
"pkg/api/v1/clients/kube/crd/solo.io/v1",
|
||||
"pkg/api/v1/clients/kubesecret",
|
||||
"pkg/api/v1/clients/memory",
|
||||
"pkg/api/v1/clients/vault",
|
||||
"pkg/api/v1/eventloop",
|
||||
"pkg/api/v1/reconcile",
|
||||
"pkg/api/v1/resources",
|
||||
"pkg/api/v1/resources/core",
|
||||
"pkg/errors",
|
||||
"pkg/utils/errutils",
|
||||
"pkg/utils/fileutils",
|
||||
"pkg/utils/hashutils",
|
||||
"pkg/utils/kubeutils",
|
||||
"pkg/utils/log",
|
||||
"pkg/utils/protoutils",
|
||||
"pkg/utils/stringutils",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ab46647c2845a4830d09db3690b3ace1b06845cd"
|
||||
version = "v0.6.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ba6f00e510774b2f1099d2f39a2ae36796ddbe406b02703b3395f26deb8d0f2c"
|
||||
name = "github.com/solo-io/supergloo"
|
||||
packages = [
|
||||
"api/custom/kubepod",
|
||||
"api/custom/linkerd",
|
||||
"pkg/api/external/istio/authorization/v1alpha1",
|
||||
"pkg/api/external/istio/networking/v1alpha3",
|
||||
"pkg/api/v1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "cb84ba5d7bd1099c5e52c09fc9229d1ee0fed9f9"
|
||||
version = "v0.3.11"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
||||
@@ -263,6 +572,36 @@
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:755d83f10748295646cf74cd19611ebffad37807e49632feb8e3f47d43210c3d"
|
||||
name = "github.com/stefanprodan/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cbb78b20423182f9e5b2a214dd255f5e117d2d1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1349e632a9915b7075f74c13474bfcae2594750c390d3c0b236e48bf6bce3fa2"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/tagencoding",
|
||||
"metric/metricdata",
|
||||
"metric/metricproducer",
|
||||
"resource",
|
||||
"stats",
|
||||
"stats/internal",
|
||||
"stats/view",
|
||||
"tag",
|
||||
"trace",
|
||||
"trace/internal",
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38"
|
||||
version = "v0.20.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
|
||||
name = "go.uber.org/atomic"
|
||||
@@ -296,15 +635,15 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:058e9504b9a79bfe86092974d05bb3298d2aa0c312d266d43148de289a5065d9"
|
||||
digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "8dd112bcdc25174059e45e07517d9fc663123347"
|
||||
revision = "b43e412143f90fca62516c457cae5a8dc1595586"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e3477b53a5c2fb71a7c9688e9b3d58be702807a5a88def8b9a327259d46e4979"
|
||||
digest = "1:c86c292c268416012ab237c22c2c69fdd04cb891815b40343e75210472198455"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
@@ -315,11 +654,11 @@
|
||||
"idna",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "16b79f2e4e95ea23b2bf9903c9809ff7b013ce85"
|
||||
revision = "1da14a5a36f220ea3f03470682b737b1dfd5de22"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:17ee74a4d9b6078611784b873cdbfe91892d2c73052c430724e66fcc015b6c7b"
|
||||
digest = "1:3121d742fbe48670a16d98b6da4693501fc33cd76d69ed6f35850c564f255c65"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
@@ -329,18 +668,18 @@
|
||||
"jwt",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3"
|
||||
revision = "9f3314589c9a9136388751d9adae6b0ed400978a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a0d91ab4d23badd4e64e115c6e6ba7dd56bd3cde5d287845822fb2599ac10236"
|
||||
digest = "1:bd7da85408c51d6ab079e1acc5a2872fdfbea42e845b8bbb538c3fac6ef43d2a"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "30e92a19ae4a77dde818b8c3d41d51e4850cba12"
|
||||
revision = "f0ce4c0180bef7e9c51babed693a6e47fdd8962f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
|
||||
@@ -371,16 +710,15 @@
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
|
||||
revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e46d8e20161401a9cf8765dfa428494a3492a0b56fe114156b7da792bf41ba78"
|
||||
digest = "1:be1ab6d2b333b1d487c01f1328aef9dc76cee4ff4f780775a552d2a1653f0207"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/ast/astutil",
|
||||
"go/gcexportdata",
|
||||
"go/internal/cgo",
|
||||
"go/internal/gcimporter",
|
||||
"go/internal/packagesdriver",
|
||||
"go/packages",
|
||||
@@ -392,10 +730,10 @@
|
||||
"internal/semver",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "f8c04913dfb7b2339a756441456bdbe0af6eb508"
|
||||
revision = "6732636ccdfd99c4301d1d1ac2307f091331f767"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d395d49d784dd3a11938a3e85091b6570664aa90ff2767a626565c6c130fa7e9"
|
||||
digest = "1:a4824d8df1fd1f63c6b3690bf4801d6ff1722adcb3e13c0489196a7e248d868a"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
@@ -410,8 +748,8 @@
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
|
||||
version = "v1.4.0"
|
||||
revision = "54a98f90d1c46b7731eb8fb305d2a321c30ef610"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fe9eb931d7b59027c4a3467f7edc16cc8552dac5328039bec05045143c18e1ce"
|
||||
@@ -438,7 +776,7 @@
|
||||
version = "v2.2.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8960ef753a87391086a307122d23cd5007cee93c28189437e4f1b6ed72bffc50"
|
||||
digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1alpha1",
|
||||
@@ -446,16 +784,19 @@
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"auditregistration/v1alpha1",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"autoscaling/v2beta2",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
@@ -472,11 +813,25 @@
|
||||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "05914d821849570fba9eacfb29466f2d8d3cd229"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:83b01e3d6f85c4e911de84febd69a2d3ece614c5a4a518fbc2b5d59000645980"
|
||||
digest = "1:501a73762f1b2c4530206ffb657b39d8b58a9b40280d30e4509ae1232767962c"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
"pkg/client/clientset/clientset",
|
||||
"pkg/client/clientset/clientset/scheme",
|
||||
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0fe22c71c47604641d9aa352c785b7912c200562"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5ac33dce66ac11d4f41c157be7f13ba30c968c74d25a3a3a0a1eddf44b6b2176"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/errors",
|
||||
@@ -508,6 +863,7 @@
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/mergepatch",
|
||||
"pkg/util/naming",
|
||||
"pkg/util/net",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
@@ -523,15 +879,61 @@
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c7d6cf5e28c377ab4000b94b6b9ff562c4b13e7e8b948ad943f133c5104be011"
|
||||
digest = "1:ef9bda0e29102ac26750517500a2cb0bd7be69ba21ad267ab89a0b35d035328b"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
"discovery/fake",
|
||||
"informers",
|
||||
"informers/admissionregistration",
|
||||
"informers/admissionregistration/v1alpha1",
|
||||
"informers/admissionregistration/v1beta1",
|
||||
"informers/apps",
|
||||
"informers/apps/v1",
|
||||
"informers/apps/v1beta1",
|
||||
"informers/apps/v1beta2",
|
||||
"informers/auditregistration",
|
||||
"informers/auditregistration/v1alpha1",
|
||||
"informers/autoscaling",
|
||||
"informers/autoscaling/v1",
|
||||
"informers/autoscaling/v2beta1",
|
||||
"informers/autoscaling/v2beta2",
|
||||
"informers/batch",
|
||||
"informers/batch/v1",
|
||||
"informers/batch/v1beta1",
|
||||
"informers/batch/v2alpha1",
|
||||
"informers/certificates",
|
||||
"informers/certificates/v1beta1",
|
||||
"informers/coordination",
|
||||
"informers/coordination/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
"informers/events",
|
||||
"informers/events/v1beta1",
|
||||
"informers/extensions",
|
||||
"informers/extensions/v1beta1",
|
||||
"informers/internalinterfaces",
|
||||
"informers/networking",
|
||||
"informers/networking/v1",
|
||||
"informers/policy",
|
||||
"informers/policy/v1beta1",
|
||||
"informers/rbac",
|
||||
"informers/rbac/v1",
|
||||
"informers/rbac/v1alpha1",
|
||||
"informers/rbac/v1beta1",
|
||||
"informers/scheduling",
|
||||
"informers/scheduling/v1alpha1",
|
||||
"informers/scheduling/v1beta1",
|
||||
"informers/settings",
|
||||
"informers/settings/v1alpha1",
|
||||
"informers/storage",
|
||||
"informers/storage/v1",
|
||||
"informers/storage/v1alpha1",
|
||||
"informers/storage/v1beta1",
|
||||
"kubernetes",
|
||||
"kubernetes/fake",
|
||||
"kubernetes/scheme",
|
||||
@@ -545,6 +947,8 @@
|
||||
"kubernetes/typed/apps/v1beta1/fake",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/apps/v1beta2/fake",
|
||||
"kubernetes/typed/auditregistration/v1alpha1",
|
||||
"kubernetes/typed/auditregistration/v1alpha1/fake",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1/fake",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
@@ -557,6 +961,8 @@
|
||||
"kubernetes/typed/autoscaling/v1/fake",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/autoscaling/v2beta1/fake",
|
||||
"kubernetes/typed/autoscaling/v2beta2",
|
||||
"kubernetes/typed/autoscaling/v2beta2/fake",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1/fake",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
@@ -565,6 +971,8 @@
|
||||
"kubernetes/typed/batch/v2alpha1/fake",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/certificates/v1beta1/fake",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/coordination/v1beta1/fake",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/core/v1/fake",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
@@ -593,6 +1001,34 @@
|
||||
"kubernetes/typed/storage/v1alpha1/fake",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"kubernetes/typed/storage/v1beta1/fake",
|
||||
"listers/admissionregistration/v1alpha1",
|
||||
"listers/admissionregistration/v1beta1",
|
||||
"listers/apps/v1",
|
||||
"listers/apps/v1beta1",
|
||||
"listers/apps/v1beta2",
|
||||
"listers/auditregistration/v1alpha1",
|
||||
"listers/autoscaling/v1",
|
||||
"listers/autoscaling/v2beta1",
|
||||
"listers/autoscaling/v2beta2",
|
||||
"listers/batch/v1",
|
||||
"listers/batch/v1beta1",
|
||||
"listers/batch/v2alpha1",
|
||||
"listers/certificates/v1beta1",
|
||||
"listers/coordination/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
"listers/networking/v1",
|
||||
"listers/policy/v1beta1",
|
||||
"listers/rbac/v1",
|
||||
"listers/rbac/v1alpha1",
|
||||
"listers/rbac/v1beta1",
|
||||
"listers/scheduling/v1alpha1",
|
||||
"listers/scheduling/v1beta1",
|
||||
"listers/settings/v1alpha1",
|
||||
"listers/storage/v1",
|
||||
"listers/storage/v1alpha1",
|
||||
"listers/storage/v1beta1",
|
||||
"pkg/apis/clientauthentication",
|
||||
"pkg/apis/clientauthentication/v1alpha1",
|
||||
"pkg/apis/clientauthentication/v1beta1",
|
||||
@@ -625,11 +1061,11 @@
|
||||
"util/workqueue",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "8d9ed539ba3134352c586810e749e58df4e94e4f"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e"
|
||||
digest = "1:dc1ae99dcab96913d81ae970b1f7a7411a54199b14bfb17a7e86f9a56979c720"
|
||||
name = "k8s.io/code-generator"
|
||||
packages = [
|
||||
"cmd/client-gen",
|
||||
@@ -653,12 +1089,12 @@
|
||||
"pkg/util",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:61024ed77a53ac618effed55043bf6a9afbdeb64136bd6a5b0c992d4c0363766"
|
||||
digest = "1:39912eb5f8eaf46486faae0839586c27c93423e552f76875defa048f52c15c15"
|
||||
name = "k8s.io/gengo"
|
||||
packages = [
|
||||
"args",
|
||||
@@ -671,23 +1107,32 @@
|
||||
"types",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a"
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0"
|
||||
version = "v0.2.0"
|
||||
revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
|
||||
digest = "1:755d83f10748295646cf74cd19611ebffad37807e49632feb8e3f47d43210c3d"
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cbb78b20423182f9e5b2a214dd255f5e117d2d1"
|
||||
source = "github.com/stefanprodan/klog"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:42674e29bf0cf4662d49bd9528e24b9ecc4895b32d0be281f9cf04d3a7671846"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/util/proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "b3a7cee44a305be0a69e1b9ac03018307287e1b0"
|
||||
revision = "6b3d3b2d5666c5912bab8b7bf26bf50f75a8f887"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
name = "sigs.k8s.io/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
|
||||
version = "v1.1.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
@@ -695,9 +1140,17 @@
|
||||
input-imports = [
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/google/go-cmp/cmp/cmpopts",
|
||||
"github.com/istio/glog",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/solo-io/gloo/projects/gloo/pkg/api/v1",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/factory",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/kube",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/memory",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/resources/core",
|
||||
"github.com/solo-io/solo-kit/pkg/errors",
|
||||
"github.com/solo-io/supergloo/pkg/api/v1",
|
||||
"github.com/stefanprodan/klog",
|
||||
"go.uber.org/zap",
|
||||
"go.uber.org/zap/zapcore",
|
||||
"gopkg.in/h2non/gock.v1",
|
||||
|
||||
30
Gopkg.toml
30
Gopkg.toml
@@ -21,25 +21,27 @@ required = [
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/code-generator"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
# This is the commit at which k8s depends on this in 1.11
|
||||
# It seems to be broken at HEAD.
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apiserver"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
@@ -50,8 +52,8 @@ required = [
|
||||
version = "v0.2.0"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/glog"
|
||||
source = "github.com/istio/glog"
|
||||
name = "k8s.io/klog"
|
||||
source = "github.com/stefanprodan/klog"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
@@ -62,3 +64,11 @@ required = [
|
||||
name = "k8s.io/code-generator"
|
||||
unused-packages = false
|
||||
non-go = false
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/solo-io/supergloo"
|
||||
version = "v0.3.11"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/solo-io/solo-kit"
|
||||
version = "v0.6.3"
|
||||
|
||||
11
Makefile
11
Makefile
@@ -4,6 +4,7 @@ VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4
|
||||
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
TS=$(shell date +%Y-%m-%d_%H-%M-%S)
|
||||
|
||||
run:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
|
||||
@@ -17,12 +18,18 @@ run-appmesh:
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
run-nginx:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=nginx -namespace=nginx \
|
||||
-metrics-server=http://prometheus-weave.istio.weavedx.com \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
build:
|
||||
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
|
||||
|
||||
push:
|
||||
docker tag weaveworks/flagger:$(TAG) quay.io/weaveworks/flagger:$(VERSION)
|
||||
docker push quay.io/weaveworks/flagger:$(VERSION)
|
||||
docker tag weaveworks/flagger:$(TAG) weaveworks/flagger:$(VERSION)
|
||||
docker push weaveworks/flagger:$(VERSION)
|
||||
|
||||
fmt:
|
||||
gofmt -l -s -w $(SOURCE_DIRS)
|
||||
|
||||
26
README.md
26
README.md
@@ -7,7 +7,7 @@
|
||||
[](https://github.com/weaveworks/flagger/releases)
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio or App Mesh routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
@@ -25,6 +25,7 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
|
||||
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
|
||||
* [Flagger install with SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
|
||||
* How it works
|
||||
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
|
||||
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
|
||||
@@ -38,6 +39,7 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
|
||||
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
|
||||
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
|
||||
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* Tutorials
|
||||
@@ -152,20 +154,20 @@ For more details on how the canary analysis and promotion works please [read the
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Istio | App Mesh |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Load testing | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (custom acceptance tests) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (Envoy metric) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (Envoy metric) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Ingress gateway (CORS, retries and timeouts) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Feature | Istio | App Mesh | SuperGloo | NGINX Ingress |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: |
|
||||
| Load testing | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (custom acceptance tests) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Ingress gateway (CORS, retries and timeouts) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
## Roadmap
|
||||
|
||||
* Integrate with other service mesh technologies like Linkerd v2, Super Gloo or Consul Mesh
|
||||
* Integrate with other service mesh technologies like Linkerd v2
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -31,6 +31,12 @@ rules:
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
|
||||
@@ -69,6 +69,18 @@ spec:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.11.1
|
||||
image: weaveworks/flagger:0.13.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
@@ -31,6 +31,7 @@ spec:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
- -control-loop-interval=10s
|
||||
- -mesh-provider=$(MESH_PROVIDER)
|
||||
- -metrics-server=http://prometheus.istio-system.svc.cluster.local:9090
|
||||
livenessProbe:
|
||||
exec:
|
||||
|
||||
@@ -57,11 +57,11 @@ spec:
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
volumeMounts:
|
||||
- name: tests
|
||||
mountPath: /bats
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: tests
|
||||
configMap:
|
||||
name: flagger-loadtester-bats
|
||||
# volumeMounts:
|
||||
# - name: tests
|
||||
# mountPath: /bats
|
||||
# readOnly: true
|
||||
# volumes:
|
||||
# - name: tests
|
||||
# configMap:
|
||||
# name: flagger-loadtester-bats
|
||||
68
artifacts/nginx/canary.yaml
Normal file
68
artifacts/nginx/canary.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# ingress reference
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# NGINX Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
logCmdOutput: "true"
|
||||
69
artifacts/nginx/deployment.yaml
Normal file
69
artifacts/nginx/deployment.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: green
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
19
artifacts/nginx/hpa.yaml
Normal file
19
artifacts/nginx/hpa.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
17
artifacts/nginx/ingress.yaml
Normal file
17
artifacts/nginx/ingress.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
@@ -1,10 +1,10 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.11.1
|
||||
appVersion: 0.11.1
|
||||
version: 0.13.2
|
||||
appVersion: 0.13.2
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
|
||||
@@ -45,7 +45,7 @@ The following tables lists the configurable parameters of the Flagger chart and
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
|
||||
`image.repository` | image repository | `weaveworks/flagger`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
|
||||
|
||||
@@ -70,6 +70,18 @@ spec:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
|
||||
@@ -38,7 +38,11 @@ spec:
|
||||
{{- if .Values.meshProvider }}
|
||||
- -mesh-provider={{ .Values.meshProvider }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.install }}
|
||||
- -metrics-server=http://{{ template "flagger.fullname" . }}-prometheus:9090
|
||||
{{- else }}
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- end }}
|
||||
{{- if .Values.namespace }}
|
||||
- -namespace={{ .Values.namespace }}
|
||||
{{- end }}
|
||||
|
||||
292
charts/flagger/templates/prometheus.yaml
Normal file
292
charts/flagger/templates/prometheus.yaml
Normal file
@@ -0,0 +1,292 @@
|
||||
{{- if .Values.prometheus.install }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
scrape_configs:
|
||||
|
||||
# Scrape config for AppMesh Envoy sidecar
|
||||
- job_name: 'appmesh-envoy'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_name]
|
||||
action: keep
|
||||
regex: '^envoy$'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: ${1}:9901
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
# Exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
# Scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# Scrape config for nodes
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# scrape config for cAdvisor
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
# scrape config for pods
|
||||
- job_name: kubernetes-pods
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
- source_labels: [ __address__ ]
|
||||
regex: '.*9901.*'
|
||||
action: drop
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: kubernetes_pod_name
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
- name: data-volume
|
||||
mountPath: /prometheus/data
|
||||
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
- name: data-volume
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
{{- end }}
|
||||
@@ -27,6 +27,12 @@ rules:
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.11.1
|
||||
tag: 0.13.2
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are istio or appmesh (defaults to istio)
|
||||
# accepted values are istio, appmesh, nginx or supergloo:mesh.namespace (defaults to istio)
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
@@ -49,3 +49,7 @@ nodeSelector: {}
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
prometheus:
|
||||
# to be used with AppMesh or nginx ingress
|
||||
install: false
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.1.0
|
||||
version: 1.2.0
|
||||
appVersion: 5.4.3
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
|
||||
@@ -1614,9 +1614,9 @@
|
||||
"multi": false,
|
||||
"name": "primary",
|
||||
"options": [],
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_workload))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"regex": "/.*destination_workload=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
@@ -1636,9 +1636,9 @@
|
||||
"multi": false,
|
||||
"name": "canary",
|
||||
"options": [],
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_workload))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"regex": "/.*destination_workload=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.3.0
|
||||
version: 0.4.0
|
||||
appVersion: 0.3.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/weaveworks/flagger-loadtester
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.3.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
|
||||
@@ -2,13 +2,18 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
_ "github.com/istio/glog"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/stefanprodan/klog"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
@@ -17,9 +22,6 @@ import (
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -97,7 +99,7 @@ func main() {
|
||||
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
|
||||
logger.Infof("Starting flagger version %s revision %s mesh provider %s", version.VERSION, version.REVISION, meshProvider)
|
||||
|
||||
ver, err := kubeClient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
@@ -134,6 +136,8 @@ func main() {
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, logger, meshClient)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
meshClient,
|
||||
@@ -143,6 +147,7 @@ func main() {
|
||||
metricsServer,
|
||||
logger,
|
||||
slack,
|
||||
routerFactory,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
labels,
|
||||
|
||||
BIN
docs/diagrams/flagger-gitops-istio.png
Normal file
BIN
docs/diagrams/flagger-gitops-istio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 35 KiB |
BIN
docs/diagrams/flagger-nginx-overview.png
Normal file
BIN
docs/diagrams/flagger-nginx-overview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
@@ -5,7 +5,7 @@ description: Flagger is a progressive delivery Kubernetes operator
|
||||
# Introduction
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of canary
|
||||
deployments using **Istio** or **App Mesh** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
deployments using **Istio**, **App Mesh** or **NGINX** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running
|
||||
system integration/acceptance tests, load tests, or any other custom validation.
|
||||
|
||||
|
||||
@@ -8,12 +8,14 @@
|
||||
* [Flagger Install on Kubernetes](install/flagger-install-on-kubernetes.md)
|
||||
* [Flagger Install on GKE Istio](install/flagger-install-on-google-cloud.md)
|
||||
* [Flagger Install on EKS App Mesh](install/flagger-install-on-eks-appmesh.md)
|
||||
* [Flagger Install with SuperGloo](install/flagger-install-with-supergloo.md)
|
||||
|
||||
## Usage
|
||||
|
||||
* [Istio Canary Deployments](usage/progressive-delivery.md)
|
||||
* [Istio A/B Testing](usage/ab-testing.md)
|
||||
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
|
||||
* [NGINX Canary Deployments](usage/nginx-progressive-delivery.md)
|
||||
* [Monitoring](usage/monitoring.md)
|
||||
* [Alerting](usage/alerting.md)
|
||||
|
||||
|
||||
@@ -689,7 +689,7 @@ webhooks:
|
||||
|
||||
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
|
||||
in the background, if they are not already running. This will ensure that during the
|
||||
analysis, the `podinfo.test` virtual service will receive a steady steam of GET and POST requests.
|
||||
analysis, the `podinfo.test` virtual service will receive a steady stream of GET and POST requests.
|
||||
|
||||
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
|
||||
public URL and use HTTP2.
|
||||
|
||||
@@ -125,19 +125,6 @@ Status:
|
||||
Type: MeshActive
|
||||
```
|
||||
|
||||
### Install Prometheus
|
||||
|
||||
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
|
||||
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
|
||||
|
||||
Deploy Prometheus in the `appmesh-system` namespace:
|
||||
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/eks/appmesh-prometheus.yaml
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
@@ -146,16 +133,17 @@ Add Flagger Helm repository:
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**appmesh-system**_ namespace:
|
||||
Deploy Flagger and Prometheus in the _**appmesh-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set meshProvider=appmesh \
|
||||
--set metricsServer=http://prometheus.appmesh-system:9090
|
||||
--set prometheus.install=true
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Istio Prometheus service on port 9090.
|
||||
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
|
||||
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
|
||||
|
||||
You can enable **Slack** notifications with:
|
||||
|
||||
|
||||
@@ -52,7 +52,8 @@ If you don't have Tiller you can use the helm template command and apply the gen
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/flagger \
|
||||
helm fetch --untar --untardir . flagger/flagger &&
|
||||
helm template flagger \
|
||||
--name flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
@@ -98,12 +99,10 @@ Or use helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/grafana \
|
||||
helm fetch --untar --untardir . flagger/grafana &&
|
||||
helm template grafana \
|
||||
--name flagger-grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090 \
|
||||
--set user=admin \
|
||||
--set password=change-me \
|
||||
> $HOME/flagger-grafana.yaml
|
||||
|
||||
# apply
|
||||
@@ -132,10 +131,14 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
Deploy with kubectl:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
helm fetch --untar --untardir . flagger/loadtester &&
|
||||
helm template loadtester \
|
||||
--name flagger-loadtester \
|
||||
--namespace=test
|
||||
> $HOME/flagger-loadtester.yaml
|
||||
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-loadtester.yaml
|
||||
```
|
||||
|
||||
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.
|
||||
|
||||
184
docs/gitbook/install/flagger-install-with-supergloo.md
Normal file
184
docs/gitbook/install/flagger-install-with-supergloo.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Flagger install on Kubernetes with SuperGloo
|
||||
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster using [SuperGloo](https://github.com/solo-io/supergloo).
|
||||
|
||||
SuperGloo by [Solo.io](https://solo.io) is an opinionated abstraction layer that simplifies the installation, management, and operation of your service mesh.
|
||||
It supports running multiple ingresses with multiple meshes (Istio, App Mesh, Consul Connect and Linkerd 2) in the same cluster.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
|
||||
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
### Install Istio with SuperGloo
|
||||
|
||||
#### Install SuperGloo command line interface helper
|
||||
|
||||
SuperGloo includes a command line helper (CLI) that makes operation of SuperGloo easier.
|
||||
The CLI is not required for SuperGloo to function correctly.
|
||||
|
||||
If you use [Homebrew](https://brew.sh) package manager run the following
|
||||
commands to install the SuperGloo CLI.
|
||||
|
||||
```bash
|
||||
brew tap solo-io/tap
|
||||
brew solo-io/tap/supergloo
|
||||
```
|
||||
|
||||
Or you can download SuperGloo CLI and add it to your path:
|
||||
|
||||
```bash
|
||||
curl -sL https://run.solo.io/supergloo/install | sh
|
||||
export PATH=$HOME/.supergloo/bin:$PATH
|
||||
```
|
||||
|
||||
#### Install SuperGloo controller
|
||||
|
||||
Deploy the SuperGloo controller in the `supergloo-system` namespace:
|
||||
|
||||
```bash
|
||||
supergloo init
|
||||
```
|
||||
|
||||
This is equivalent to installing SuperGloo using its Helm chart
|
||||
|
||||
```bash
|
||||
helm repo add supergloo http://storage.googleapis.com/supergloo-helm
|
||||
helm upgrade --install supergloo supergloo/supergloo --namespace supergloo-system
|
||||
```
|
||||
|
||||
#### Install Istio using SuperGloo
|
||||
|
||||
Create the `istio-system` namespace and install Istio with traffic management, telemetry and Prometheus enabled:
|
||||
|
||||
```bash
|
||||
ISTIO_VER="1.0.6"
|
||||
|
||||
kubectl create namespace istio-system
|
||||
|
||||
supergloo install istio --name istio \
|
||||
--namespace=supergloo-system \
|
||||
--auto-inject=true \
|
||||
--installation-namespace=istio-system \
|
||||
--mtls=false \
|
||||
--prometheus=true \
|
||||
--version=${ISTIO_VER}
|
||||
```
|
||||
|
||||
This creates a Kubernetes Custom Resource (CRD) like the following.
|
||||
|
||||
```yaml
|
||||
apiVersion: supergloo.solo.io/v1
|
||||
kind: Install
|
||||
metadata:
|
||||
name: istio
|
||||
namespace: supergloo-system
|
||||
spec:
|
||||
installationNamespace: istio-system
|
||||
mesh:
|
||||
installedMesh:
|
||||
name: istio
|
||||
namespace: supergloo-system
|
||||
istioMesh:
|
||||
enableAutoInject: true
|
||||
enableMtls: false
|
||||
installGrafana: false
|
||||
installJaeger: false
|
||||
installPrometheus: true
|
||||
istioVersion: 1.0.6
|
||||
```
|
||||
|
||||
#### Allow Flagger to manipulate SuperGloo
|
||||
|
||||
Create a cluster role binding so that Flagger can manipulate SuperGloo custom resources:
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding flagger-supergloo \
|
||||
--clusterrole=mesh-discovery \
|
||||
--serviceaccount=istio-system:flagger
|
||||
```
|
||||
|
||||
Wait for the Istio control plane to become available:
|
||||
|
||||
```bash
|
||||
kubectl --namespace istio-system rollout status deployment/istio-sidecar-injector
|
||||
kubectl --namespace istio-system rollout status deployment/prometheus
|
||||
```
|
||||
|
||||
### Install Flagger
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace and set the service mesh provider to SuperGloo:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set meshProvider=supergloo:istio.supergloo-system
|
||||
```
|
||||
|
||||
When using SuperGloo the mesh provider format is `supergloo:<MESH-NAME>.<SUPERGLOO-NAMESPACE>`.
|
||||
|
||||
Optionally you can enable **Slack** notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Install Grafana
|
||||
|
||||
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
|
||||
|
||||
Deploy Grafana in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
|
||||
```
|
||||
|
||||
### Install Load Tester
|
||||
|
||||
Flagger comes with an optional load testing service that generates traffic
|
||||
during canary analysis when configured as a webhook.
|
||||
|
||||
Deploy the load test runner with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set cmd.timeout=1h
|
||||
```
|
||||
|
||||
Deploy with kubectl:
|
||||
|
||||
```bash
|
||||
helm fetch --untar --untardir . flagger/loadtester &&
|
||||
helm template loadtester \
|
||||
--name flagger-loadtester \
|
||||
--namespace=test
|
||||
> $HOME/flagger-loadtester.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-loadtester.yaml
|
||||
```
|
||||
|
||||
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.
|
||||
421
docs/gitbook/usage/nginx-progressive-delivery.md
Normal file
421
docs/gitbook/usage/nginx-progressive-delivery.md
Normal file
@@ -0,0 +1,421 @@
|
||||
# NGNIX Ingress Controller Canary Deployments
|
||||
|
||||
This guide shows you how to use the NGINX ingress controller and Flagger to automate canary deployments and A/B testing.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer and NGINX ingress **0.24** or newer.
|
||||
|
||||
Install NGINX with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress \
|
||||
--namespace ingress-nginx \
|
||||
--set controller.stats.enabled=true \
|
||||
--set controller.metrics.enabled=true \
|
||||
--set controller.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set controller.podAnnotations."prometheus\.io/port"=10254
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as NGINX:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace ingress-nginx \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=nginx
|
||||
```
|
||||
|
||||
Optionally you can enable Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace ingress-nginx \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and canary ingress).
|
||||
These objects expose the application outside the cluster and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create an ingress definition (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-ingress.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# ingress reference
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# NGINX Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
# load testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
ingresses.extensions/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
ingresses.extensions/podinfo-canary
|
||||
```
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-05-06T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-05-05T16:15:07Z
|
||||
prod backend Failed 0 2019-05-04T17:05:07Z
|
||||
```
|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/status/500
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Custom metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration
|
||||
histogram to validate the canary.
|
||||
|
||||
Edit the canary analysis and add the following metric:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
The threshold is set to 500ms so if the average request duration in the last minute
|
||||
goes over half a second then the analysis will fail and the canary will not be promoted.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.3
|
||||
```
|
||||
|
||||
Generate high response latency:
|
||||
|
||||
```bash
|
||||
watch curl http://app.exmaple.com/delay/2
|
||||
```
|
||||
|
||||
Watch Flagger logs:
|
||||
|
||||
```
|
||||
kubectl -n nginx-ingress logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement latency 1.20 > 0.5
|
||||
Halt podinfo.test advancement latency 1.45 > 0.5
|
||||
Halt podinfo.test advancement latency 1.60 > 0.5
|
||||
Halt podinfo.test advancement latency 1.69 > 0.5
|
||||
Halt podinfo.test advancement latency 1.70 > 0.5
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
### A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
|
||||
This is particularly useful for frontend applications that require session affinity.
|
||||
|
||||

|
||||
|
||||
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 10
|
||||
iterations: 10
|
||||
match:
|
||||
# curl -H 'X-Canary: insider' http://app.example.com
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
# curl -b 'canary=always' http://app.example.com
|
||||
- headers:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://localhost:8888/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com/"
|
||||
logCmdOutput: "true"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
|
||||
those that call the service using the `X-Canary: insider` header.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.5.0
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
@@ -52,6 +52,10 @@ type CanarySpec struct {
|
||||
// +optional
|
||||
AutoscalerRef *hpav1.CrossVersionObjectReference `json:"autoscalerRef,omitempty"`
|
||||
|
||||
// reference to NGINX ingress resource
|
||||
// +optional
|
||||
IngressRef *hpav1.CrossVersionObjectReference `json:"ingressRef,omitempty"`
|
||||
|
||||
// virtual service spec
|
||||
Service CanaryService `json:"service"`
|
||||
|
||||
|
||||
@@ -205,6 +205,11 @@ func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
|
||||
*out = new(v1.CrossVersionObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.IngressRef != nil {
|
||||
in, out := &in.IngressRef, &out.IngressRef
|
||||
*out = new(v1.CrossVersionObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
in.Service.DeepCopyInto(&out.Service)
|
||||
in.CanaryAnalysis.DeepCopyInto(&out.CanaryAnalysis)
|
||||
if in.ProgressDeadlineSeconds != nil {
|
||||
|
||||
@@ -214,7 +214,6 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, error)
|
||||
primaryDep = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryName,
|
||||
Labels: canaryDep.Labels,
|
||||
Namespace: cd.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
|
||||
@@ -26,15 +26,16 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
var parameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
appmeshv1beta1.AddToScheme,
|
||||
flaggerv1alpha3.AddToScheme,
|
||||
networkingv1alpha3.AddToScheme,
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
@@ -47,12 +48,13 @@ func init() {
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
appmeshv1beta1.AddToScheme(scheme)
|
||||
flaggerv1alpha3.AddToScheme(scheme)
|
||||
networkingv1alpha3.AddToScheme(scheme)
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(scheme))
|
||||
}
|
||||
|
||||
@@ -26,15 +26,16 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(Scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
appmeshv1beta1.AddToScheme,
|
||||
flaggerv1alpha3.AddToScheme,
|
||||
networkingv1alpha3.AddToScheme,
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
@@ -47,12 +48,13 @@ func init() {
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
appmeshv1beta1.AddToScheme(scheme)
|
||||
flaggerv1alpha3.AddToScheme(scheme)
|
||||
networkingv1alpha3.AddToScheme(scheme)
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(Scheme))
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func (c *FakeMeshes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.
|
||||
// Patch applies the patch and returns the patched mesh.
|
||||
func (c *FakeMeshes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Mesh, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(meshesResource, name, data, subresources...), &v1beta1.Mesh{})
|
||||
Invokes(testing.NewRootPatchSubresourceAction(meshesResource, name, pt, data, subresources...), &v1beta1.Mesh{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *FakeVirtualNodes) DeleteCollection(options *v1.DeleteOptions, listOptio
|
||||
// Patch applies the patch and returns the patched virtualNode.
|
||||
func (c *FakeVirtualNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VirtualNode, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualnodesResource, c.ns, name, data, subresources...), &v1beta1.VirtualNode{})
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualnodesResource, c.ns, name, pt, data, subresources...), &v1beta1.VirtualNode{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *FakeVirtualServices) DeleteCollection(options *v1.DeleteOptions, listOp
|
||||
// Patch applies the patch and returns the patched virtualService.
|
||||
func (c *FakeVirtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VirtualService, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, data, subresources...), &v1beta1.VirtualService{})
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, pt, data, subresources...), &v1beta1.VirtualService{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -73,10 +75,15 @@ func (c *meshes) Get(name string, options v1.GetOptions) (result *v1beta1.Mesh,
|
||||
|
||||
// List takes label and field selectors, and returns the list of Meshes that match those selectors.
|
||||
func (c *meshes) List(opts v1.ListOptions) (result *v1beta1.MeshList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1beta1.MeshList{}
|
||||
err = c.client.Get().
|
||||
Resource("meshes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -84,10 +91,15 @@ func (c *meshes) List(opts v1.ListOptions) (result *v1beta1.MeshList, err error)
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested meshes.
|
||||
func (c *meshes) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Resource("meshes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -141,9 +153,14 @@ func (c *meshes) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *meshes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Resource("meshes").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -76,11 +78,16 @@ func (c *virtualNodes) Get(name string, options v1.GetOptions) (result *v1beta1.
|
||||
|
||||
// List takes label and field selectors, and returns the list of VirtualNodes that match those selectors.
|
||||
func (c *virtualNodes) List(opts v1.ListOptions) (result *v1beta1.VirtualNodeList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1beta1.VirtualNodeList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualnodes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -88,11 +95,16 @@ func (c *virtualNodes) List(opts v1.ListOptions) (result *v1beta1.VirtualNodeLis
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested virtualNodes.
|
||||
func (c *virtualNodes) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualnodes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -150,10 +162,15 @@ func (c *virtualNodes) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *virtualNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualnodes").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -76,11 +78,16 @@ func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1bet
|
||||
|
||||
// List takes label and field selectors, and returns the list of VirtualServices that match those selectors.
|
||||
func (c *virtualServices) List(opts v1.ListOptions) (result *v1beta1.VirtualServiceList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1beta1.VirtualServiceList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -88,11 +95,16 @@ func (c *virtualServices) List(opts v1.ListOptions) (result *v1beta1.VirtualServ
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested virtualServices.
|
||||
func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -150,10 +162,15 @@ func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -76,11 +78,16 @@ func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha3.Can
|
||||
|
||||
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
|
||||
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha3.CanaryList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -88,11 +95,16 @@ func (c *canaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err e
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested canaries.
|
||||
func (c *canaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -150,10 +162,15 @@ func (c *canaries) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *canaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *FakeCanaries) DeleteCollection(options *v1.DeleteOptions, listOptions v
|
||||
// Patch applies the patch and returns the patched canary.
|
||||
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha3.Canary{})
|
||||
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, pt, data, subresources...), &v1alpha3.Canary{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -119,7 +119,7 @@ func (c *FakeVirtualServices) DeleteCollection(options *v1.DeleteOptions, listOp
|
||||
// Patch applies the patch and returns the patched virtualService.
|
||||
func (c *FakeVirtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, data, subresources...), &v1alpha3.VirtualService{})
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, pt, data, subresources...), &v1alpha3.VirtualService{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -75,11 +77,16 @@ func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1alp
|
||||
|
||||
// List takes label and field selectors, and returns the list of VirtualServices that match those selectors.
|
||||
func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha3.VirtualServiceList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -87,11 +94,16 @@ func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualSer
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested virtualServices.
|
||||
func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -133,10 +145,15 @@ func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
|
||||
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
@@ -35,4 +36,5 @@ type SharedInformerFactory interface {
|
||||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
|
||||
type TweakListOptionsFunc func(*v1.ListOptions)
|
||||
|
||||
@@ -2,11 +2,13 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/weaveworks/flagger/pkg/canary"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/canary"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
@@ -46,6 +48,7 @@ type Controller struct {
|
||||
observer metrics.Observer
|
||||
recorder metrics.Recorder
|
||||
notifier *notifier.Slack
|
||||
routerFactory *router.Factory
|
||||
meshProvider string
|
||||
}
|
||||
|
||||
@@ -58,6 +61,7 @@ func NewController(
|
||||
metricServer string,
|
||||
logger *zap.SugaredLogger,
|
||||
notifier *notifier.Slack,
|
||||
routerFactory *router.Factory,
|
||||
meshProvider string,
|
||||
version string,
|
||||
labels []string,
|
||||
@@ -103,6 +107,7 @@ func NewController(
|
||||
observer: metrics.NewObserver(metricServer),
|
||||
recorder: recorder,
|
||||
notifier: notifier,
|
||||
routerFactory: routerFactory,
|
||||
meshProvider: meshProvider,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
istiov1alpha1 "github.com/weaveworks/flagger/pkg/apis/istio/common/v1alpha1"
|
||||
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
|
||||
@@ -21,8 +24,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -82,6 +83,9 @@ func SetupMocks(abtest bool) Mocks {
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
// init router
|
||||
rf := router.NewFactory(nil, kubeClient, flaggerClient, logger, flaggerClient)
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: flaggerClient,
|
||||
@@ -96,11 +100,10 @@ func SetupMocks(abtest bool) Mocks {
|
||||
deployer: deployer,
|
||||
observer: observer,
|
||||
recorder: metrics.NewRecorder(controllerAgentName, false),
|
||||
routerFactory: rf,
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
|
||||
// init router
|
||||
rf := router.NewFactory(kubeClient, flaggerClient, logger, flaggerClient)
|
||||
meshRouter := rf.MeshRouter("istio")
|
||||
|
||||
return Mocks{
|
||||
|
||||
@@ -2,12 +2,13 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// scheduleCanaries synchronises the canary map with the jobs map,
|
||||
@@ -97,11 +98,10 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
|
||||
// init routers
|
||||
routerFactory := router.NewFactory(c.kubeClient, c.flaggerClient, c.logger, c.istioClient)
|
||||
meshRouter := routerFactory.MeshRouter(c.meshProvider)
|
||||
meshRouter := c.routerFactory.MeshRouter(c.meshProvider)
|
||||
|
||||
// create or update ClusterIP services
|
||||
if err := routerFactory.KubernetesRouter(label).Reconcile(cd); err != nil {
|
||||
if err := c.routerFactory.KubernetesRouter(label).Reconcile(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -632,6 +632,41 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// NGINX checks
|
||||
if c.meshProvider == "nginx" {
|
||||
if metric.Name == "request-success-rate" {
|
||||
val, err := c.observer.GetNginxSuccessRate(r.Spec.IngressRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if float64(metric.Threshold) > val {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
|
||||
r.Name, r.Namespace, val, metric.Threshold)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if metric.Name == "request-duration" {
|
||||
val, err := c.observer.GetNginxRequestDuration(r.Spec.IngressRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
return false
|
||||
}
|
||||
t := time.Duration(metric.Threshold) * time.Millisecond
|
||||
if val > t {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
|
||||
r.Name, r.Namespace, val, t)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// custom checks
|
||||
if metric.Query != "" {
|
||||
val, err := c.observer.GetScalar(metric.Query)
|
||||
|
||||
122
pkg/metrics/nginx.go
Normal file
122
pkg/metrics/nginx.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const nginxSuccessRateQuery = `
|
||||
sum(rate(
|
||||
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}",
|
||||
status!~"5.*"}
|
||||
[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(
|
||||
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}
|
||||
[{{ .Interval }}]))
|
||||
* 100
|
||||
`
|
||||
|
||||
// GetNginxSuccessRate returns the requests success rate (non 5xx) using nginx_ingress_controller_requests metric
|
||||
func (c *Observer) GetNginxSuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxSuccessRateQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
return *rate, nil
|
||||
}
|
||||
|
||||
const nginxRequestDurationQuery = `
|
||||
sum(rate(
|
||||
nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}[{{ .Interval }}])) * 1000
|
||||
`
|
||||
|
||||
// GetNginxRequestDuration returns the avg requests latency using nginx_ingress_controller_ingress_upstream_latency_seconds_sum metric
|
||||
func (c *Observer) GetNginxRequestDuration(name string, namespace string, metric string, interval string) (time.Duration, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxRequestDurationQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
ms := time.Duration(int64(*rate)) * time.Millisecond
|
||||
return ms, nil
|
||||
}
|
||||
51
pkg/metrics/nginx_test.go
Normal file
51
pkg/metrics/nginx_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_NginxSuccessRateQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"nginx",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxSuccessRateQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo",status!~"5.*"}[1m])) / sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo"}[1m])) * 100`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NginxRequestDurationQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"nginx",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxRequestDurationQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="nginx",ingress="podinfo"}[1m])) /sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="nginx",ingress="podinfo"}[1m])) * 1000`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
@@ -99,7 +99,9 @@ func (c *Observer) GetScalar(query string) (float64, error) {
|
||||
query = strings.Replace(query, " ", "", -1)
|
||||
|
||||
var value *float64
|
||||
result, err := c.queryMetric(query)
|
||||
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -1,23 +1,29 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type Factory struct {
|
||||
kubeConfig *restclient.Config
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewFactory(kubeClient kubernetes.Interface,
|
||||
func NewFactory(kubeConfig *restclient.Config, kubeClient kubernetes.Interface,
|
||||
flaggerClient clientset.Interface,
|
||||
logger *zap.SugaredLogger,
|
||||
meshClient clientset.Interface) *Factory {
|
||||
return &Factory{
|
||||
kubeConfig: kubeConfig,
|
||||
meshClient: meshClient,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
@@ -35,20 +41,33 @@ func (factory *Factory) KubernetesRouter(label string) *KubernetesRouter {
|
||||
}
|
||||
}
|
||||
|
||||
// MeshRouter returns a service mesh router (Istio or AppMesh)
|
||||
// MeshRouter returns a service mesh router
|
||||
func (factory *Factory) MeshRouter(provider string) Interface {
|
||||
if provider == "appmesh" {
|
||||
switch {
|
||||
case provider == "nginx":
|
||||
return &IngressRouter{
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
}
|
||||
case provider == "appmesh":
|
||||
return &AppMeshRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
appmeshClient: factory.meshClient,
|
||||
}
|
||||
}
|
||||
return &IstioRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
istioClient: factory.meshClient,
|
||||
case strings.HasPrefix(provider, "supergloo"):
|
||||
supergloo, err := NewSuperglooRouter(context.TODO(), provider, factory.flaggerClient, factory.logger, factory.kubeConfig)
|
||||
if err != nil {
|
||||
panic("failed creating supergloo client")
|
||||
}
|
||||
return supergloo
|
||||
default:
|
||||
return &IstioRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
istioClient: factory.meshClient,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
231
pkg/router/ingress.go
Normal file
231
pkg/router/ingress.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type IngressRouter struct {
|
||||
kubeClient kubernetes.Interface
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func (i *IngressRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
if canary.Spec.IngressRef == nil || canary.Spec.IngressRef.Name == "" {
|
||||
return fmt.Errorf("ingress selector is empty")
|
||||
}
|
||||
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
canaryName := fmt.Sprintf("%s-canary", targetName)
|
||||
canaryIngressName := fmt.Sprintf("%s-canary", canary.Spec.IngressRef.Name)
|
||||
|
||||
ingress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canary.Spec.IngressRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ingressClone := ingress.DeepCopy()
|
||||
|
||||
// change backend to <deployment-name>-canary
|
||||
backendExists := false
|
||||
for k, v := range ingressClone.Spec.Rules {
|
||||
for x, y := range v.HTTP.Paths {
|
||||
if y.Backend.ServiceName == targetName {
|
||||
ingressClone.Spec.Rules[k].HTTP.Paths[x].Backend.ServiceName = canaryName
|
||||
backendExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !backendExists {
|
||||
return fmt.Errorf("backend %s not found in ingress %s", targetName, canary.Spec.IngressRef.Name)
|
||||
}
|
||||
|
||||
canaryIngress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canaryIngressName, metav1.GetOptions{})
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
ing := &v1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: canaryIngressName,
|
||||
Namespace: canary.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
Annotations: i.makeAnnotations(ingressClone.Annotations),
|
||||
Labels: ingressClone.Labels,
|
||||
},
|
||||
Spec: ingressClone.Spec,
|
||||
}
|
||||
|
||||
_, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Create(ing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Ingress %s.%s created", ing.GetName(), canary.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("ingress %s query error %v", canaryIngressName, err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(ingressClone.Spec, canaryIngress.Spec); diff != "" {
|
||||
iClone := canaryIngress.DeepCopy()
|
||||
iClone.Spec = ingressClone.Spec
|
||||
|
||||
_, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Update(iClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ingress %s update error %v", canaryIngressName, err)
|
||||
}
|
||||
|
||||
i.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Ingress %s updated", canaryIngressName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IngressRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
primaryWeight int,
|
||||
canaryWeight int,
|
||||
err error,
|
||||
) {
|
||||
canaryIngressName := fmt.Sprintf("%s-canary", canary.Spec.IngressRef.Name)
|
||||
canaryIngress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canaryIngressName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// A/B testing
|
||||
if len(canary.Spec.CanaryAnalysis.Match) > 0 {
|
||||
for k := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-by-cookie" || k == "nginx.ingress.kubernetes.io/canary-by-header" {
|
||||
return 0, 100, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Canary
|
||||
for k, v := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-weight" {
|
||||
val, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
canaryWeight = val
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
primaryWeight = 100 - canaryWeight
|
||||
return
|
||||
}
|
||||
|
||||
func (i *IngressRouter) SetRoutes(
|
||||
canary *flaggerv1.Canary,
|
||||
primaryWeight int,
|
||||
canaryWeight int,
|
||||
) error {
|
||||
canaryIngressName := fmt.Sprintf("%s-canary", canary.Spec.IngressRef.Name)
|
||||
canaryIngress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canaryIngressName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
iClone := canaryIngress.DeepCopy()
|
||||
|
||||
// A/B testing
|
||||
if len(canary.Spec.CanaryAnalysis.Match) > 0 {
|
||||
cookie := ""
|
||||
header := ""
|
||||
headerValue := ""
|
||||
for _, m := range canary.Spec.CanaryAnalysis.Match {
|
||||
for k, v := range m.Headers {
|
||||
if k == "cookie" {
|
||||
cookie = v.Exact
|
||||
} else {
|
||||
header = k
|
||||
headerValue = v.Exact
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
iClone.Annotations = i.makeHeaderAnnotations(iClone.Annotations, header, headerValue, cookie)
|
||||
} else {
|
||||
// canary
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary-weight"] = fmt.Sprintf("%v", canaryWeight)
|
||||
}
|
||||
|
||||
// toggle canary
|
||||
if canaryWeight > 0 {
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
} else {
|
||||
iClone.Annotations = i.makeAnnotations(iClone.Annotations)
|
||||
}
|
||||
|
||||
_, err = i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Update(iClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ingress %s update error %v", canaryIngressName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IngressRouter) makeAnnotations(annotations map[string]string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(k, "nginx.ingress.kubernetes.io/canary") &&
|
||||
!strings.Contains(k, "kubectl.kubernetes.io/last-applied-configuration") {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "false"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (i *IngressRouter) makeHeaderAnnotations(annotations map[string]string,
|
||||
header string, headerValue string, cookie string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(v, "nginx.ingress.kubernetes.io/canary") {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
|
||||
if cookie != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-cookie"] = cookie
|
||||
}
|
||||
|
||||
if header != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header"] = header
|
||||
}
|
||||
|
||||
if headerValue != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header-value"] = headerValue
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
112
pkg/router/ingress_test.go
Normal file
112
pkg/router/ingress_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if _, ok := inCanary.Annotations[canaryAn]; !ok {
|
||||
t.Errorf("Canary annotation missing")
|
||||
}
|
||||
|
||||
// test initialisation
|
||||
if inCanary.Annotations[canaryAn] != "false" {
|
||||
t.Errorf("Got canary annotation %v wanted false", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "0" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 0", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
}
|
||||
|
||||
func TestIngressRouter_GetSetRoutes(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p, c, err := router.GetRoutes(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p = 50
|
||||
c = 50
|
||||
|
||||
err = router.SetRoutes(mocks.ingressCanary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if _, ok := inCanary.Annotations[canaryAn]; !ok {
|
||||
t.Errorf("Canary annotation missing")
|
||||
}
|
||||
|
||||
// test rollout
|
||||
if inCanary.Annotations[canaryAn] != "true" {
|
||||
t.Errorf("Got canary annotation %v wanted true", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "50" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 50", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
|
||||
p = 100
|
||||
c = 0
|
||||
|
||||
err = router.SetRoutes(mocks.ingressCanary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
inCanary, err = router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// test promotion
|
||||
if inCanary.Annotations[canaryAn] != "false" {
|
||||
t.Errorf("Got canary annotation %v wanted false", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "0" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 0", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ func (ir *IstioRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
hosts := canary.Spec.Service.Hosts
|
||||
var hasServiceHost bool
|
||||
for _, h := range hosts {
|
||||
if h == targetName {
|
||||
if h == targetName || h == "*" {
|
||||
hasServiceHost = true
|
||||
break
|
||||
}
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
@@ -20,6 +22,7 @@ type fakeClients struct {
|
||||
canary *v1alpha3.Canary
|
||||
abtest *v1alpha3.Canary
|
||||
appmeshCanary *v1alpha3.Canary
|
||||
ingressCanary *v1alpha3.Canary
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
@@ -30,9 +33,10 @@ func setupfakeClients() fakeClients {
|
||||
canary := newMockCanary()
|
||||
abtest := newMockABTest()
|
||||
appmeshCanary := newMockCanaryAppMesh()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary, abtest, appmeshCanary)
|
||||
ingressCanary := newMockCanaryIngress()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary, abtest, appmeshCanary, ingressCanary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(newMockDeployment(), newMockABTestDeployment())
|
||||
kubeClient := fake.NewSimpleClientset(newMockDeployment(), newMockABTestDeployment(), newMockIngress())
|
||||
|
||||
meshClient := fakeFlagger.NewSimpleClientset()
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
@@ -41,6 +45,7 @@ func setupfakeClients() fakeClients {
|
||||
canary: canary,
|
||||
abtest: abtest,
|
||||
appmeshCanary: appmeshCanary,
|
||||
ingressCanary: ingressCanary,
|
||||
kubeClient: kubeClient,
|
||||
meshClient: meshClient,
|
||||
flaggerClient: flaggerClient,
|
||||
@@ -266,3 +271,73 @@ func newMockABTestDeployment() *appsv1.Deployment {
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newMockCanaryIngress() *v1alpha3.Canary {
|
||||
cd := &v1alpha3.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "nginx",
|
||||
},
|
||||
Spec: v1alpha3.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
IngressRef: &hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Ingress",
|
||||
},
|
||||
Service: v1alpha3.CanaryService{
|
||||
Port: 9898,
|
||||
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []v1alpha3.CanaryMetric{
|
||||
{
|
||||
Name: "request-success-rate",
|
||||
Threshold: 99,
|
||||
Interval: "1m",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cd
|
||||
}
|
||||
|
||||
func newMockIngress() *v1beta1.Ingress {
|
||||
return &v1beta1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1beta1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: v1beta1.IngressSpec{
|
||||
Rules: []v1beta1.IngressRule{
|
||||
{
|
||||
Host: "app.example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{
|
||||
Paths: []v1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: v1beta1.IngressBackend{
|
||||
ServiceName: "podinfo",
|
||||
ServicePort: intstr.FromInt(9898),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
344
pkg/router/supergloo.go
Normal file
344
pkg/router/supergloo.go
Normal file
@@ -0,0 +1,344 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
solokitclients "github.com/solo-io/solo-kit/pkg/api/v1/clients"
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/factory"
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/kube"
|
||||
crdv1 "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd/solo.io/v1"
|
||||
solokitcore "github.com/solo-io/solo-kit/pkg/api/v1/resources/core"
|
||||
solokiterror "github.com/solo-io/solo-kit/pkg/errors"
|
||||
|
||||
types "github.com/gogo/protobuf/types"
|
||||
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
|
||||
supergloov1alpha3 "github.com/solo-io/supergloo/pkg/api/external/istio/networking/v1alpha3"
|
||||
supergloov1 "github.com/solo-io/supergloo/pkg/api/v1"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// SuperglooRouter is managing Istio virtual services
|
||||
type SuperglooRouter struct {
|
||||
rrClient supergloov1.RoutingRuleClient
|
||||
logger *zap.SugaredLogger
|
||||
targetMesh solokitcore.ResourceRef
|
||||
}
|
||||
|
||||
func NewSuperglooRouter(ctx context.Context, provider string, flaggerClient clientset.Interface, logger *zap.SugaredLogger, cfg *rest.Config) (*SuperglooRouter, error) {
|
||||
// TODO if cfg is nil use memory client instead?
|
||||
sharedCache := kube.NewKubeCache(ctx)
|
||||
routingRuleClient, err := supergloov1.NewRoutingRuleClient(&factory.KubeResourceClientFactory{
|
||||
Crd: supergloov1.RoutingRuleCrd,
|
||||
Cfg: cfg,
|
||||
SharedCache: sharedCache,
|
||||
SkipCrdCreation: true,
|
||||
})
|
||||
if err != nil {
|
||||
// this should never happen.
|
||||
return nil, fmt.Errorf("creating RoutingRule client %v", err)
|
||||
}
|
||||
if err := routingRuleClient.Register(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// remove the supergloo: prefix
|
||||
provider = strings.TrimPrefix(provider, "supergloo:")
|
||||
// split name.namespace:
|
||||
parts := strings.Split(provider, ".")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid format for supergloo provider")
|
||||
}
|
||||
targetMesh := solokitcore.ResourceRef{
|
||||
Namespace: parts[1],
|
||||
Name: parts[0],
|
||||
}
|
||||
return NewSuperglooRouterWithClient(ctx, routingRuleClient, targetMesh, logger), nil
|
||||
}
|
||||
|
||||
func NewSuperglooRouterWithClient(ctx context.Context, routingRuleClient supergloov1.RoutingRuleClient, targetMesh solokitcore.ResourceRef, logger *zap.SugaredLogger) *SuperglooRouter {
|
||||
return &SuperglooRouter{rrClient: routingRuleClient, logger: logger, targetMesh: targetMesh}
|
||||
}
|
||||
|
||||
// Reconcile creates or updates the Istio virtual service
|
||||
func (sr *SuperglooRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
|
||||
if err := sr.setRetries(canary); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sr.setHeaders(canary); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sr.setCors(canary); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// do we have routes already?
|
||||
if _, _, err := sr.GetRoutes(canary); err == nil {
|
||||
// we have routes, no need to do anything else
|
||||
return nil
|
||||
} else if solokiterror.IsNotExist(err) {
|
||||
return sr.SetRoutes(canary, 100, 0)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *SuperglooRouter) setRetries(canary *flaggerv1.Canary) error {
|
||||
if canary.Spec.Service.Retries == nil {
|
||||
return nil
|
||||
}
|
||||
retries, err := convertRetries(canary.Spec.Service.Retries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rule := sr.createRule(canary, "retries", &supergloov1.RoutingRuleSpec{
|
||||
RuleType: &supergloov1.RoutingRuleSpec_Retries{
|
||||
Retries: retries,
|
||||
},
|
||||
})
|
||||
|
||||
return sr.writeRuleForCanary(canary, rule)
|
||||
}
|
||||
func (sr *SuperglooRouter) setHeaders(canary *flaggerv1.Canary) error {
|
||||
if canary.Spec.Service.Headers == nil {
|
||||
return nil
|
||||
}
|
||||
headerManipulation, err := convertHeaders(canary.Spec.Service.Headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if headerManipulation == nil {
|
||||
return nil
|
||||
}
|
||||
rule := sr.createRule(canary, "headers", &supergloov1.RoutingRuleSpec{
|
||||
RuleType: &supergloov1.RoutingRuleSpec_HeaderManipulation{
|
||||
HeaderManipulation: headerManipulation,
|
||||
},
|
||||
})
|
||||
|
||||
return sr.writeRuleForCanary(canary, rule)
|
||||
}
|
||||
|
||||
func convertHeaders(headers *istiov1alpha3.Headers) (*supergloov1.HeaderManipulation, error) {
|
||||
var headersMaipulation *supergloov1.HeaderManipulation
|
||||
|
||||
if headers.Request != nil {
|
||||
headersMaipulation = &supergloov1.HeaderManipulation{}
|
||||
|
||||
headersMaipulation.RemoveRequestHeaders = headers.Request.Remove
|
||||
headersMaipulation.AppendRequestHeaders = make(map[string]string)
|
||||
for k, v := range headers.Request.Add {
|
||||
headersMaipulation.AppendRequestHeaders[k] = v
|
||||
}
|
||||
}
|
||||
if headers.Response != nil {
|
||||
if headersMaipulation == nil {
|
||||
headersMaipulation = &supergloov1.HeaderManipulation{}
|
||||
}
|
||||
|
||||
headersMaipulation.RemoveResponseHeaders = headers.Response.Remove
|
||||
headersMaipulation.AppendResponseHeaders = make(map[string]string)
|
||||
for k, v := range headers.Response.Add {
|
||||
headersMaipulation.AppendResponseHeaders[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return headersMaipulation, nil
|
||||
}
|
||||
|
||||
func convertRetries(retries *istiov1alpha3.HTTPRetry) (*supergloov1.RetryPolicy, error) {
|
||||
perTryTimeout, err := time.ParseDuration(retries.PerTryTimeout)
|
||||
return &supergloov1.RetryPolicy{
|
||||
MaxRetries: &supergloov1alpha3.HTTPRetry{
|
||||
Attempts: int32(retries.Attempts),
|
||||
PerTryTimeout: types.DurationProto(perTryTimeout),
|
||||
RetryOn: retries.RetryOn,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
func (sr *SuperglooRouter) setCors(canary *flaggerv1.Canary) error {
|
||||
corsPolicy := canary.Spec.Service.CorsPolicy
|
||||
if corsPolicy == nil {
|
||||
return nil
|
||||
}
|
||||
var maxAgeDuration *types.Duration
|
||||
if maxAge, err := time.ParseDuration(corsPolicy.MaxAge); err == nil {
|
||||
maxAgeDuration = types.DurationProto(maxAge)
|
||||
}
|
||||
|
||||
rule := sr.createRule(canary, "cors", &supergloov1.RoutingRuleSpec{
|
||||
RuleType: &supergloov1.RoutingRuleSpec_CorsPolicy{
|
||||
CorsPolicy: &supergloov1alpha3.CorsPolicy{
|
||||
AllowOrigin: corsPolicy.AllowOrigin,
|
||||
AllowMethods: corsPolicy.AllowMethods,
|
||||
AllowHeaders: corsPolicy.AllowHeaders,
|
||||
ExposeHeaders: corsPolicy.ExposeHeaders,
|
||||
MaxAge: maxAgeDuration,
|
||||
AllowCredentials: &types.BoolValue{Value: corsPolicy.AllowCredentials},
|
||||
},
|
||||
},
|
||||
})
|
||||
return sr.writeRuleForCanary(canary, rule)
|
||||
}
|
||||
|
||||
func (sr *SuperglooRouter) createRule(canary *flaggerv1.Canary, namesuffix string, spec *supergloov1.RoutingRuleSpec) *supergloov1.RoutingRule {
|
||||
if namesuffix != "" {
|
||||
namesuffix = "-" + namesuffix
|
||||
}
|
||||
return &supergloov1.RoutingRule{
|
||||
Metadata: solokitcore.Metadata{
|
||||
Name: canary.Spec.TargetRef.Name + namesuffix,
|
||||
Namespace: canary.Namespace,
|
||||
},
|
||||
TargetMesh: &sr.targetMesh,
|
||||
DestinationSelector: &supergloov1.PodSelector{
|
||||
SelectorType: &supergloov1.PodSelector_UpstreamSelector_{
|
||||
UpstreamSelector: &supergloov1.PodSelector_UpstreamSelector{
|
||||
Upstreams: []solokitcore.ResourceRef{{
|
||||
Name: upstreamName(canary.Namespace, fmt.Sprintf("%s", canary.Spec.TargetRef.Name), canary.Spec.Service.Port),
|
||||
Namespace: sr.targetMesh.Namespace,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRoutes returns the destinations weight for primary and canary
|
||||
func (sr *SuperglooRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
primaryWeight int,
|
||||
canaryWeight int,
|
||||
err error,
|
||||
) {
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
var rr *supergloov1.RoutingRule
|
||||
rr, err = sr.rrClient.Read(canary.Namespace, targetName, solokitclients.ReadOpts{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
traffic := rr.GetSpec().GetTrafficShifting()
|
||||
if traffic == nil {
|
||||
err = fmt.Errorf("target rule is not for traffic shifting")
|
||||
return
|
||||
}
|
||||
dests := traffic.GetDestinations().GetDestinations()
|
||||
for _, dest := range dests {
|
||||
if dest.GetDestination().GetUpstream().Name == upstreamName(canary.Namespace, fmt.Sprintf("%s-primary", targetName), canary.Spec.Service.Port) {
|
||||
primaryWeight = int(dest.Weight)
|
||||
}
|
||||
if dest.GetDestination().GetUpstream().Name == upstreamName(canary.Namespace, fmt.Sprintf("%s-canary", targetName), canary.Spec.Service.Port) {
|
||||
canaryWeight = int(dest.Weight)
|
||||
}
|
||||
}
|
||||
|
||||
if primaryWeight == 0 && canaryWeight == 0 {
|
||||
err = fmt.Errorf("RoutingRule %s.%s does not contain routes for %s-primary and %s-canary",
|
||||
targetName, canary.Namespace, targetName, targetName)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func upstreamName(serviceNamespace, serviceName string, port int32) string {
|
||||
return fmt.Sprintf("%s-%s-%d", serviceNamespace, serviceName, port)
|
||||
}
|
||||
|
||||
// SetRoutes updates the destinations weight for primary and canary
|
||||
func (sr *SuperglooRouter) SetRoutes(
|
||||
canary *flaggerv1.Canary,
|
||||
primaryWeight int,
|
||||
canaryWeight int,
|
||||
) error {
|
||||
// upstream name is
|
||||
// in gloo-system
|
||||
// and is the same as
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
|
||||
destinations := []*gloov1.WeightedDestination{}
|
||||
if primaryWeight != 0 {
|
||||
destinations = append(destinations, &gloov1.WeightedDestination{
|
||||
Destination: &gloov1.Destination{
|
||||
Upstream: solokitcore.ResourceRef{
|
||||
Name: upstreamName(canary.Namespace, fmt.Sprintf("%s-primary", targetName), canary.Spec.Service.Port),
|
||||
Namespace: sr.targetMesh.Namespace,
|
||||
},
|
||||
},
|
||||
Weight: uint32(primaryWeight),
|
||||
})
|
||||
}
|
||||
|
||||
if canaryWeight != 0 {
|
||||
destinations = append(destinations, &gloov1.WeightedDestination{
|
||||
Destination: &gloov1.Destination{
|
||||
Upstream: solokitcore.ResourceRef{
|
||||
Name: upstreamName(canary.Namespace, fmt.Sprintf("%s-canary", targetName), canary.Spec.Service.Port),
|
||||
Namespace: sr.targetMesh.Namespace,
|
||||
},
|
||||
},
|
||||
Weight: uint32(canaryWeight),
|
||||
})
|
||||
}
|
||||
|
||||
if len(destinations) == 0 {
|
||||
return fmt.Errorf("RoutingRule %s.%s update failed: no valid weights", targetName, canary.Namespace)
|
||||
}
|
||||
|
||||
rule := sr.createRule(canary, "", &supergloov1.RoutingRuleSpec{
|
||||
RuleType: &supergloov1.RoutingRuleSpec_TrafficShifting{
|
||||
TrafficShifting: &supergloov1.TrafficShifting{
|
||||
Destinations: &gloov1.MultiDestination{
|
||||
Destinations: destinations,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return sr.writeRuleForCanary(canary, rule)
|
||||
}
|
||||
|
||||
func (sr *SuperglooRouter) writeRuleForCanary(canary *flaggerv1.Canary, rule *supergloov1.RoutingRule) error {
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
|
||||
if oldRr, err := sr.rrClient.Read(rule.Metadata.Namespace, rule.Metadata.Name, solokitclients.ReadOpts{}); err != nil {
|
||||
// ignore not exist errors..
|
||||
if !solokiterror.IsNotExist(err) {
|
||||
return fmt.Errorf("RoutingRule %s.%s read failed: %v", targetName, canary.Namespace, err)
|
||||
}
|
||||
} else {
|
||||
rule.Metadata.ResourceVersion = oldRr.Metadata.ResourceVersion
|
||||
// if the old and the new one are equal, no need to do anything.
|
||||
oldRr.Status = solokitcore.Status{}
|
||||
if oldRr.Equal(rule) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
kubeWriteOpts := &kube.KubeWriteOpts{
|
||||
PreWriteCallback: func(r *crdv1.Resource) {
|
||||
r.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
}
|
||||
},
|
||||
}
|
||||
writeOpts := solokitclients.WriteOpts{OverwriteExisting: true, StorageWriteOpts: kubeWriteOpts}
|
||||
_, err := sr.rrClient.Write(rule, writeOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("RoutingRule %s.%s update failed: %v", targetName, canary.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
149
pkg/router/supergloo_test.go
Normal file
149
pkg/router/supergloo_test.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
|
||||
solokitclients "github.com/solo-io/solo-kit/pkg/api/v1/clients"
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/factory"
|
||||
solokitmemory "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory"
|
||||
solokitcore "github.com/solo-io/solo-kit/pkg/api/v1/resources/core"
|
||||
supergloov1 "github.com/solo-io/supergloo/pkg/api/v1"
|
||||
)
|
||||
|
||||
func TestSuperglooRouter_Sync(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
|
||||
routingRuleClient, err := supergloov1.NewRoutingRuleClient(&factory.MemoryResourceClientFactory{
|
||||
Cache: solokitmemory.NewInMemoryResourceCache(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if err := routingRuleClient.Register(); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
targetMesh := solokitcore.ResourceRef{
|
||||
Namespace: "supergloo-system",
|
||||
Name: "mesh",
|
||||
}
|
||||
router := NewSuperglooRouterWithClient(context.TODO(), routingRuleClient, targetMesh, mocks.logger)
|
||||
err = router.Reconcile(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// test insert
|
||||
rr, err := routingRuleClient.Read("default", "podinfo", solokitclients.ReadOpts{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
dests := rr.Spec.GetTrafficShifting().GetDestinations().GetDestinations()
|
||||
if len(dests) != 1 {
|
||||
t.Errorf("Got RoutingRule Destinations %v wanted %v", len(dests), 1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSuperglooRouter_SetRoutes(t *testing.T) {
|
||||
|
||||
mocks := setupfakeClients()
|
||||
|
||||
routingRuleClient, err := supergloov1.NewRoutingRuleClient(&factory.MemoryResourceClientFactory{
|
||||
Cache: solokitmemory.NewInMemoryResourceCache(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if err := routingRuleClient.Register(); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
targetMesh := solokitcore.ResourceRef{
|
||||
Namespace: "supergloo-system",
|
||||
Name: "mesh",
|
||||
}
|
||||
router := NewSuperglooRouterWithClient(context.TODO(), routingRuleClient, targetMesh, mocks.logger)
|
||||
|
||||
err = router.Reconcile(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p, c, err := router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p = 50
|
||||
c = 50
|
||||
|
||||
err = router.SetRoutes(mocks.canary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
rr, err := routingRuleClient.Read("default", "podinfo", solokitclients.ReadOpts{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
var pRoute *gloov1.WeightedDestination
|
||||
var cRoute *gloov1.WeightedDestination
|
||||
targetName := mocks.canary.Spec.TargetRef.Name
|
||||
|
||||
for _, dest := range rr.GetSpec().GetTrafficShifting().GetDestinations().GetDestinations() {
|
||||
if dest.GetDestination().GetUpstream().Name == upstreamName(mocks.canary.Namespace, fmt.Sprintf("%s-primary", targetName), mocks.canary.Spec.Service.Port) {
|
||||
pRoute = dest
|
||||
}
|
||||
if dest.GetDestination().GetUpstream().Name == upstreamName(mocks.canary.Namespace, fmt.Sprintf("%s-canary", targetName), mocks.canary.Spec.Service.Port) {
|
||||
cRoute = dest
|
||||
}
|
||||
}
|
||||
|
||||
if pRoute.Weight != uint32(p) {
|
||||
t.Errorf("Got primary weight %v wanted %v", pRoute.Weight, p)
|
||||
}
|
||||
|
||||
if cRoute.Weight != uint32(c) {
|
||||
t.Errorf("Got canary weight %v wanted %v", cRoute.Weight, c)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSuperglooRouter_GetRoutes(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
|
||||
routingRuleClient, err := supergloov1.NewRoutingRuleClient(&factory.MemoryResourceClientFactory{
|
||||
Cache: solokitmemory.NewInMemoryResourceCache(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if err := routingRuleClient.Register(); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
targetMesh := solokitcore.ResourceRef{
|
||||
Namespace: "supergloo-system",
|
||||
Name: "mesh",
|
||||
}
|
||||
router := NewSuperglooRouterWithClient(context.TODO(), routingRuleClient, targetMesh, mocks.logger)
|
||||
err = router.Reconcile(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p, c, err := router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if p != 100 {
|
||||
t.Errorf("Got primary weight %v wanted %v", p, 100)
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
t.Errorf("Got canary weight %v wanted %v", c, 0)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.11.1"
|
||||
var VERSION = "0.13.2"
|
||||
var REVISION = "unknown"
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
FROM golang:1.11
|
||||
|
||||
RUN go get -u sigs.k8s.io/kind
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
The e2e testing infrastructure is powered by CircleCI and [Kubernetes Kind](https://github.com/kubernetes-sigs/kind).
|
||||
|
||||
CircleCI e2e workflow:
|
||||
### CircleCI e2e Istio workflow
|
||||
|
||||
* install latest stable kubectl [e2e-kind.sh](e2e-kind.sh)
|
||||
* install Kubernetes Kind [e2e-kind.sh](e2e-kind.sh)
|
||||
@@ -21,4 +21,20 @@ CircleCI e2e workflow:
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-tests.sh](e2e-tests.sh)
|
||||
* test the A/B testing analysis and promotion using cookies filters and pre/post rollout webhooks [e2e-tests.sh](e2e-tests.sh)
|
||||
|
||||
### CircleCI e2e NGINX ingress workflow
|
||||
|
||||
* install latest stable kubectl [e2e-kind.sh](e2e-kind.sh)
|
||||
* install Kubernetes Kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* create local Kubernetes cluster with kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* install latest stable Helm CLI [e2e-nginx.sh](e2e-istio.sh)
|
||||
* deploy Tiller on the local cluster [e2e-nginx.sh](e2e-istio.sh)
|
||||
* install NGINX ingress with Helm [e2e-nginx.sh](e2e-istio.sh)
|
||||
* build Flagger container image [e2e-nginx-build.sh](e2e-build.sh)
|
||||
* load Flagger image onto the local cluster [e2e-nginx-build.sh](e2e-build.sh)
|
||||
* install Flagger and Prometheus in the ingress-nginx namespace [e2e-nginx-build.sh](e2e-build.sh)
|
||||
* create a test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the load tester in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the demo workload (podinfo) and ingress in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary initialization [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the A/B testing analysis and promotion using header filters and pre/post rollout webhooks [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
|
||||
@@ -11,5 +11,10 @@ cd ${REPO_ROOT} && docker build -t test/flagger:latest . -f Dockerfile
|
||||
echo '>>> Installing Flagger'
|
||||
kind load docker-image test/flagger:latest
|
||||
kubectl apply -f ${REPO_ROOT}/artifacts/flagger/
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
kubectl -n istio-system set env deployment/flagger "MESH_PROVIDER=$1"
|
||||
fi
|
||||
|
||||
kubectl -n istio-system set image deployment/flagger flagger=test/flagger:latest
|
||||
kubectl -n istio-system rollout status deployment/flagger
|
||||
|
||||
17
test/e2e-ingress.yaml
Normal file
17
test/e2e-ingress.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
24
test/e2e-nginx-build.sh
Executable file
24
test/e2e-nginx-build.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Building Flagger'
|
||||
cd ${REPO_ROOT} && docker build -t test/flagger:latest . -f Dockerfile
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
kind load docker-image test/flagger:latest
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
helm upgrade -i flagger ${REPO_ROOT}/charts/flagger \
|
||||
--wait \
|
||||
--namespace ingress-nginx \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=nginx
|
||||
|
||||
kubectl -n ingress-nginx set image deployment/flagger flagger=test/flagger:latest
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger-prometheus
|
||||
194
test/e2e-nginx-tests.sh
Executable file
194
test/e2e-nginx-tests.sh
Executable file
@@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs e2e tests for Canary initialization, analysis and promotion
|
||||
# Prerequisites: Kubernetes Kind, Helm and NGINX ingress controller
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Creating test namespace'
|
||||
kubectl create namespace test
|
||||
|
||||
echo '>>> Installing load tester'
|
||||
kubectl -n test apply -f ${REPO_ROOT}/artifacts/loadtester/
|
||||
kubectl -n test rollout status deployment/flagger-loadtester
|
||||
|
||||
echo '>>> Initialising canary'
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-ingress.yaml
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 15s
|
||||
threshold: 15
|
||||
maxWeight: 30
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 -host app.example.com http://nginx-ingress-controller.ingress-nginx"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
echo '>>> Waiting for primary to be ready'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary initialization test passed'
|
||||
|
||||
echo '>>> Triggering canary deployment'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
|
||||
echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.1' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n ingress-nginx logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
if [ "$1" = "canary" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 10s
|
||||
threshold: 5
|
||||
iterations: 5
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
- headers:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: pre
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 -H 'X-Canary: insider' -host app.example.com http://nginx-ingress-controller.ingress-nginx"
|
||||
logCmdOutput: "true"
|
||||
- name: post
|
||||
type: post-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "curl -sH 'Host: app.example.com' http://nginx-ingress-controller.ingress-nginx"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
echo '>>> Triggering A/B testing'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
|
||||
echo '>>> Waiting for A/B testing promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.2' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n ingress-nginx logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ A/B testing promotion test passed'
|
||||
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
|
||||
echo '✔ All tests passed'
|
||||
29
test/e2e-nginx.sh
Executable file
29
test/e2e-nginx.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo ">>> Installing Helm"
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
|
||||
echo '>>> Installing Tiller'
|
||||
kubectl --namespace kube-system create sa tiller
|
||||
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
|
||||
helm init --service-account tiller --upgrade --wait
|
||||
|
||||
echo '>>> Installing NGINX Ingress'
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress \
|
||||
--wait \
|
||||
--namespace ingress-nginx \
|
||||
--set controller.stats.enabled=true \
|
||||
--set controller.metrics.enabled=true \
|
||||
--set controller.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set controller.podAnnotations."prometheus\.io/port"=10254 \
|
||||
--set controller.service.type=NodePort
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/nginx-ingress-controller
|
||||
kubectl -n ingress-nginx get all
|
||||
|
||||
|
||||
34
test/e2e-supergloo.sh
Executable file
34
test/e2e-supergloo.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
ISTIO_VER="1.0.6"
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo ">>> Downloading Supergloo CLI"
|
||||
curl -SsL https://github.com/solo-io/supergloo/releases/download/v0.3.13/supergloo-cli-linux-amd64 > supergloo-cli
|
||||
chmod +x supergloo-cli
|
||||
|
||||
echo ">>> Installing Supergloo"
|
||||
./supergloo-cli init
|
||||
echo ">>> Installing Istio ${ISTIO_VER}"
|
||||
kubectl create ns istio-system
|
||||
./supergloo-cli install istio --name test --namespace supergloo-system --auto-inject=true --installation-namespace istio-system --mtls=false --prometheus=true --version ${ISTIO_VER}
|
||||
|
||||
echo '>>> Waiting for Istio to be ready'
|
||||
until kubectl -n supergloo-system get mesh test
|
||||
do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# add rbac rules
|
||||
kubectl create clusterrolebinding flagger-supergloo --clusterrole=mesh-discovery --serviceaccount=istio-system:flagger
|
||||
|
||||
kubectl -n istio-system rollout status deployment/istio-pilot
|
||||
kubectl -n istio-system rollout status deployment/istio-policy
|
||||
kubectl -n istio-system rollout status deployment/istio-sidecar-injector
|
||||
kubectl -n istio-system rollout status deployment/istio-telemetry
|
||||
kubectl -n istio-system rollout status deployment/prometheus
|
||||
|
||||
kubectl -n istio-system get all
|
||||
@@ -125,6 +125,10 @@ done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
if [ "$1" = "canary" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
|
||||
20
vendor/github.com/armon/go-metrics/LICENSE
generated
vendored
Normal file
20
vendor/github.com/armon/go-metrics/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
12
vendor/github.com/armon/go-metrics/const_unix.go
generated
vendored
Normal file
12
vendor/github.com/armon/go-metrics/const_unix.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build !windows
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSignal is used with DefaultInmemSignal
|
||||
DefaultSignal = syscall.SIGUSR1
|
||||
)
|
||||
13
vendor/github.com/armon/go-metrics/const_windows.go
generated
vendored
Normal file
13
vendor/github.com/armon/go-metrics/const_windows.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build windows
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSignal is used with DefaultInmemSignal
|
||||
// Windows has no SIGUSR1, use SIGBREAK
|
||||
DefaultSignal = syscall.Signal(21)
|
||||
)
|
||||
348
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
Normal file
348
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// InmemSink provides a MetricSink that does in-memory aggregation
|
||||
// without sending metrics over a network. It can be embedded within
|
||||
// an application to provide profiling information.
|
||||
type InmemSink struct {
|
||||
// How long is each aggregation interval
|
||||
interval time.Duration
|
||||
|
||||
// Retain controls how many metrics interval we keep
|
||||
retain time.Duration
|
||||
|
||||
// maxIntervals is the maximum length of intervals.
|
||||
// It is retain / interval.
|
||||
maxIntervals int
|
||||
|
||||
// intervals is a slice of the retained intervals
|
||||
intervals []*IntervalMetrics
|
||||
intervalLock sync.RWMutex
|
||||
|
||||
rateDenom float64
|
||||
}
|
||||
|
||||
// IntervalMetrics stores the aggregated metrics
|
||||
// for a specific interval
|
||||
type IntervalMetrics struct {
|
||||
sync.RWMutex
|
||||
|
||||
// The start time of the interval
|
||||
Interval time.Time
|
||||
|
||||
// Gauges maps the key to the last set value
|
||||
Gauges map[string]GaugeValue
|
||||
|
||||
// Points maps the string to the list of emitted values
|
||||
// from EmitKey
|
||||
Points map[string][]float32
|
||||
|
||||
// Counters maps the string key to a sum of the counter
|
||||
// values
|
||||
Counters map[string]SampledValue
|
||||
|
||||
// Samples maps the key to an AggregateSample,
|
||||
// which has the rolled up view of a sample
|
||||
Samples map[string]SampledValue
|
||||
}
|
||||
|
||||
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
|
||||
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
|
||||
return &IntervalMetrics{
|
||||
Interval: intv,
|
||||
Gauges: make(map[string]GaugeValue),
|
||||
Points: make(map[string][]float32),
|
||||
Counters: make(map[string]SampledValue),
|
||||
Samples: make(map[string]SampledValue),
|
||||
}
|
||||
}
|
||||
|
||||
// AggregateSample is used to hold aggregate metrics
|
||||
// about a sample
|
||||
type AggregateSample struct {
|
||||
Count int // The count of emitted pairs
|
||||
Rate float64 // The values rate per time unit (usually 1 second)
|
||||
Sum float64 // The sum of values
|
||||
SumSq float64 `json:"-"` // The sum of squared values
|
||||
Min float64 // Minimum value
|
||||
Max float64 // Maximum value
|
||||
LastUpdated time.Time `json:"-"` // When value was last updated
|
||||
}
|
||||
|
||||
// Computes a Stddev of the values
|
||||
func (a *AggregateSample) Stddev() float64 {
|
||||
num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
|
||||
div := float64(a.Count * (a.Count - 1))
|
||||
if div == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Sqrt(num / div)
|
||||
}
|
||||
|
||||
// Computes a mean of the values
|
||||
func (a *AggregateSample) Mean() float64 {
|
||||
if a.Count == 0 {
|
||||
return 0
|
||||
}
|
||||
return a.Sum / float64(a.Count)
|
||||
}
|
||||
|
||||
// Ingest is used to update a sample
|
||||
func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
|
||||
a.Count++
|
||||
a.Sum += v
|
||||
a.SumSq += (v * v)
|
||||
if v < a.Min || a.Count == 1 {
|
||||
a.Min = v
|
||||
}
|
||||
if v > a.Max || a.Count == 1 {
|
||||
a.Max = v
|
||||
}
|
||||
a.Rate = float64(a.Sum) / rateDenom
|
||||
a.LastUpdated = time.Now()
|
||||
}
|
||||
|
||||
func (a *AggregateSample) String() string {
|
||||
if a.Count == 0 {
|
||||
return "Count: 0"
|
||||
} else if a.Stddev() == 0 {
|
||||
return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
|
||||
} else {
|
||||
return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
|
||||
a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
|
||||
}
|
||||
}
|
||||
|
||||
// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
params := u.Query()
|
||||
|
||||
interval, err := time.ParseDuration(params.Get("interval"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad 'interval' param: %s", err)
|
||||
}
|
||||
|
||||
retain, err := time.ParseDuration(params.Get("retain"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad 'retain' param: %s", err)
|
||||
}
|
||||
|
||||
return NewInmemSink(interval, retain), nil
|
||||
}
|
||||
|
||||
// NewInmemSink is used to construct a new in-memory sink.
|
||||
// Uses an aggregation interval and maximum retention period.
|
||||
func NewInmemSink(interval, retain time.Duration) *InmemSink {
|
||||
rateTimeUnit := time.Second
|
||||
i := &InmemSink{
|
||||
interval: interval,
|
||||
retain: retain,
|
||||
maxIntervals: int(retain / interval),
|
||||
rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
|
||||
}
|
||||
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *InmemSink) SetGauge(key []string, val float32) {
|
||||
i.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
|
||||
}
|
||||
|
||||
func (i *InmemSink) EmitKey(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
vals := intv.Points[k]
|
||||
intv.Points[k] = append(vals, val)
|
||||
}
|
||||
|
||||
func (i *InmemSink) IncrCounter(key []string, val float32) {
|
||||
i.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg, ok := intv.Counters[k]
|
||||
if !ok {
|
||||
agg = SampledValue{
|
||||
Name: name,
|
||||
AggregateSample: &AggregateSample{},
|
||||
Labels: labels,
|
||||
}
|
||||
intv.Counters[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val), i.rateDenom)
|
||||
}
|
||||
|
||||
func (i *InmemSink) AddSample(key []string, val float32) {
|
||||
i.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg, ok := intv.Samples[k]
|
||||
if !ok {
|
||||
agg = SampledValue{
|
||||
Name: name,
|
||||
AggregateSample: &AggregateSample{},
|
||||
Labels: labels,
|
||||
}
|
||||
intv.Samples[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val), i.rateDenom)
|
||||
}
|
||||
|
||||
// Data is used to retrieve all the aggregated metrics
|
||||
// Intervals may be in use, and a read lock should be acquired
|
||||
func (i *InmemSink) Data() []*IntervalMetrics {
|
||||
// Get the current interval, forces creation
|
||||
i.getInterval()
|
||||
|
||||
i.intervalLock.RLock()
|
||||
defer i.intervalLock.RUnlock()
|
||||
|
||||
n := len(i.intervals)
|
||||
intervals := make([]*IntervalMetrics, n)
|
||||
|
||||
copy(intervals[:n-1], i.intervals[:n-1])
|
||||
current := i.intervals[n-1]
|
||||
|
||||
// make its own copy for current interval
|
||||
intervals[n-1] = &IntervalMetrics{}
|
||||
copyCurrent := intervals[n-1]
|
||||
current.RLock()
|
||||
*copyCurrent = *current
|
||||
|
||||
copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
|
||||
for k, v := range current.Gauges {
|
||||
copyCurrent.Gauges[k] = v
|
||||
}
|
||||
// saved values will be not change, just copy its link
|
||||
copyCurrent.Points = make(map[string][]float32, len(current.Points))
|
||||
for k, v := range current.Points {
|
||||
copyCurrent.Points[k] = v
|
||||
}
|
||||
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
|
||||
for k, v := range current.Counters {
|
||||
copyCurrent.Counters[k] = v
|
||||
}
|
||||
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
|
||||
for k, v := range current.Samples {
|
||||
copyCurrent.Samples[k] = v
|
||||
}
|
||||
current.RUnlock()
|
||||
|
||||
return intervals
|
||||
}
|
||||
|
||||
func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics {
|
||||
i.intervalLock.RLock()
|
||||
defer i.intervalLock.RUnlock()
|
||||
|
||||
n := len(i.intervals)
|
||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
||||
return i.intervals[n-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics {
|
||||
i.intervalLock.Lock()
|
||||
defer i.intervalLock.Unlock()
|
||||
|
||||
// Check for an existing interval
|
||||
n := len(i.intervals)
|
||||
if n > 0 && i.intervals[n-1].Interval == intv {
|
||||
return i.intervals[n-1]
|
||||
}
|
||||
|
||||
// Add the current interval
|
||||
current := NewIntervalMetrics(intv)
|
||||
i.intervals = append(i.intervals, current)
|
||||
n++
|
||||
|
||||
// Truncate the intervals if they are too long
|
||||
if n >= i.maxIntervals {
|
||||
copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
|
||||
i.intervals = i.intervals[:i.maxIntervals]
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
// getInterval returns the current interval to write to
|
||||
func (i *InmemSink) getInterval() *IntervalMetrics {
|
||||
intv := time.Now().Truncate(i.interval)
|
||||
if m := i.getExistingInterval(intv); m != nil {
|
||||
return m
|
||||
}
|
||||
return i.createInterval(intv)
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (i *InmemSink) flattenKey(parts []string) string {
|
||||
buf := &bytes.Buffer{}
|
||||
replacer := strings.NewReplacer(" ", "_")
|
||||
|
||||
if len(parts) > 0 {
|
||||
replacer.WriteString(buf, parts[0])
|
||||
}
|
||||
for _, part := range parts[1:] {
|
||||
replacer.WriteString(buf, ".")
|
||||
replacer.WriteString(buf, part)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Flattens the key for formatting along with its labels, removes spaces
|
||||
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
|
||||
buf := &bytes.Buffer{}
|
||||
replacer := strings.NewReplacer(" ", "_")
|
||||
|
||||
if len(parts) > 0 {
|
||||
replacer.WriteString(buf, parts[0])
|
||||
}
|
||||
for _, part := range parts[1:] {
|
||||
replacer.WriteString(buf, ".")
|
||||
replacer.WriteString(buf, part)
|
||||
}
|
||||
|
||||
key := buf.String()
|
||||
|
||||
for _, label := range labels {
|
||||
replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
|
||||
}
|
||||
|
||||
return buf.String(), key
|
||||
}
|
||||
118
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
Normal file
118
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MetricsSummary holds a roll-up of metrics info for a given interval
|
||||
type MetricsSummary struct {
|
||||
Timestamp string
|
||||
Gauges []GaugeValue
|
||||
Points []PointValue
|
||||
Counters []SampledValue
|
||||
Samples []SampledValue
|
||||
}
|
||||
|
||||
type GaugeValue struct {
|
||||
Name string
|
||||
Hash string `json:"-"`
|
||||
Value float32
|
||||
|
||||
Labels []Label `json:"-"`
|
||||
DisplayLabels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
type PointValue struct {
|
||||
Name string
|
||||
Points []float32
|
||||
}
|
||||
|
||||
type SampledValue struct {
|
||||
Name string
|
||||
Hash string `json:"-"`
|
||||
*AggregateSample
|
||||
Mean float64
|
||||
Stddev float64
|
||||
|
||||
Labels []Label `json:"-"`
|
||||
DisplayLabels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
|
||||
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
data := i.Data()
|
||||
|
||||
var interval *IntervalMetrics
|
||||
n := len(data)
|
||||
switch {
|
||||
case n == 0:
|
||||
return nil, fmt.Errorf("no metric intervals have been initialized yet")
|
||||
case n == 1:
|
||||
// Show the current interval if it's all we have
|
||||
interval = i.intervals[0]
|
||||
default:
|
||||
// Show the most recent finished interval if we have one
|
||||
interval = i.intervals[n-2]
|
||||
}
|
||||
|
||||
summary := MetricsSummary{
|
||||
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
|
||||
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
|
||||
Points: make([]PointValue, 0, len(interval.Points)),
|
||||
}
|
||||
|
||||
// Format and sort the output of each metric type, so it gets displayed in a
|
||||
// deterministic order.
|
||||
for name, points := range interval.Points {
|
||||
summary.Points = append(summary.Points, PointValue{name, points})
|
||||
}
|
||||
sort.Slice(summary.Points, func(i, j int) bool {
|
||||
return summary.Points[i].Name < summary.Points[j].Name
|
||||
})
|
||||
|
||||
for hash, value := range interval.Gauges {
|
||||
value.Hash = hash
|
||||
value.DisplayLabels = make(map[string]string)
|
||||
for _, label := range value.Labels {
|
||||
value.DisplayLabels[label.Name] = label.Value
|
||||
}
|
||||
value.Labels = nil
|
||||
|
||||
summary.Gauges = append(summary.Gauges, value)
|
||||
}
|
||||
sort.Slice(summary.Gauges, func(i, j int) bool {
|
||||
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
|
||||
})
|
||||
|
||||
summary.Counters = formatSamples(interval.Counters)
|
||||
summary.Samples = formatSamples(interval.Samples)
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func formatSamples(source map[string]SampledValue) []SampledValue {
|
||||
output := make([]SampledValue, 0, len(source))
|
||||
for hash, sample := range source {
|
||||
displayLabels := make(map[string]string)
|
||||
for _, label := range sample.Labels {
|
||||
displayLabels[label.Name] = label.Value
|
||||
}
|
||||
|
||||
output = append(output, SampledValue{
|
||||
Name: sample.Name,
|
||||
Hash: hash,
|
||||
AggregateSample: sample.AggregateSample,
|
||||
Mean: sample.AggregateSample.Mean(),
|
||||
Stddev: sample.AggregateSample.Stddev(),
|
||||
DisplayLabels: displayLabels,
|
||||
})
|
||||
}
|
||||
sort.Slice(output, func(i, j int) bool {
|
||||
return output[i].Hash < output[j].Hash
|
||||
})
|
||||
|
||||
return output
|
||||
}
|
||||
117
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
Normal file
117
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// InmemSignal is used to listen for a given signal, and when received,
|
||||
// to dump the current metrics from the InmemSink to an io.Writer
|
||||
type InmemSignal struct {
|
||||
signal syscall.Signal
|
||||
inm *InmemSink
|
||||
w io.Writer
|
||||
sigCh chan os.Signal
|
||||
|
||||
stop bool
|
||||
stopCh chan struct{}
|
||||
stopLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewInmemSignal creates a new InmemSignal which listens for a given signal,
|
||||
// and dumps the current metrics out to a writer
|
||||
func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
|
||||
i := &InmemSignal{
|
||||
signal: sig,
|
||||
inm: inmem,
|
||||
w: w,
|
||||
sigCh: make(chan os.Signal, 1),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
signal.Notify(i.sigCh, sig)
|
||||
go i.run()
|
||||
return i
|
||||
}
|
||||
|
||||
// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
|
||||
// and writes output to stderr. Windows uses SIGBREAK
|
||||
func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
|
||||
return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
|
||||
}
|
||||
|
||||
// Stop is used to stop the InmemSignal from listening
|
||||
func (i *InmemSignal) Stop() {
|
||||
i.stopLock.Lock()
|
||||
defer i.stopLock.Unlock()
|
||||
|
||||
if i.stop {
|
||||
return
|
||||
}
|
||||
i.stop = true
|
||||
close(i.stopCh)
|
||||
signal.Stop(i.sigCh)
|
||||
}
|
||||
|
||||
// run is a long running routine that handles signals
|
||||
func (i *InmemSignal) run() {
|
||||
for {
|
||||
select {
|
||||
case <-i.sigCh:
|
||||
i.dumpStats()
|
||||
case <-i.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dumpStats is used to dump the data to output writer
|
||||
func (i *InmemSignal) dumpStats() {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
data := i.inm.Data()
|
||||
// Skip the last period which is still being aggregated
|
||||
for j := 0; j < len(data)-1; j++ {
|
||||
intv := data[j]
|
||||
intv.RLock()
|
||||
for _, val := range intv.Gauges {
|
||||
name := i.flattenLabels(val.Name, val.Labels)
|
||||
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
|
||||
}
|
||||
for name, vals := range intv.Points {
|
||||
for _, val := range vals {
|
||||
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
|
||||
}
|
||||
}
|
||||
for _, agg := range intv.Counters {
|
||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
||||
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
||||
}
|
||||
for _, agg := range intv.Samples {
|
||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
||||
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
||||
}
|
||||
intv.RUnlock()
|
||||
}
|
||||
|
||||
// Write out the bytes
|
||||
i.w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// Flattens the key for formatting along with its labels, removes spaces
|
||||
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
|
||||
buf := bytes.NewBufferString(name)
|
||||
replacer := strings.NewReplacer(" ", "_", ":", "_")
|
||||
|
||||
for _, label := range labels {
|
||||
replacer.WriteString(buf, ".")
|
||||
replacer.WriteString(buf, label.Value)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
278
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
Normal file
278
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,278 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
type Label struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (m *Metrics) SetGauge(key []string, val float32) {
|
||||
m.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" {
|
||||
if m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
} else if m.EnableHostname {
|
||||
key = insert(0, m.HostName, key)
|
||||
}
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "gauge", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) EmitKey(key []string, val float32) {
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "kv", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
allowed, _ := m.allowMetric(key, nil)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.EmitKey(key, val)
|
||||
}
|
||||
|
||||
func (m *Metrics) IncrCounter(key []string, val float32) {
|
||||
m.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "counter", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) AddSample(key []string, val float32) {
|
||||
m.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "sample", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.AddSampleWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) MeasureSince(key []string, start time.Time) {
|
||||
m.MeasureSinceWithLabels(key, start, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "timer", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
elapsed := now.Sub(start)
|
||||
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
|
||||
m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
|
||||
}
|
||||
|
||||
// UpdateFilter overwrites the existing filter with the given rules.
|
||||
func (m *Metrics) UpdateFilter(allow, block []string) {
|
||||
m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
|
||||
}
|
||||
|
||||
// UpdateFilterAndLabels overwrites the existing filter with the given rules.
|
||||
func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
||||
m.filterLock.Lock()
|
||||
defer m.filterLock.Unlock()
|
||||
|
||||
m.AllowedPrefixes = allow
|
||||
m.BlockedPrefixes = block
|
||||
|
||||
if allowedLabels == nil {
|
||||
// Having a white list means we take only elements from it
|
||||
m.allowedLabels = nil
|
||||
} else {
|
||||
m.allowedLabels = make(map[string]bool)
|
||||
for _, v := range allowedLabels {
|
||||
m.allowedLabels[v] = true
|
||||
}
|
||||
}
|
||||
m.blockedLabels = make(map[string]bool)
|
||||
for _, v := range blockedLabels {
|
||||
m.blockedLabels[v] = true
|
||||
}
|
||||
m.AllowedLabels = allowedLabels
|
||||
m.BlockedLabels = blockedLabels
|
||||
|
||||
m.filter = iradix.New()
|
||||
for _, prefix := range m.AllowedPrefixes {
|
||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
|
||||
}
|
||||
for _, prefix := range m.BlockedPrefixes {
|
||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
|
||||
}
|
||||
}
|
||||
|
||||
// labelIsAllowed return true if a should be included in metric
|
||||
// the caller should lock m.filterLock while calling this method
|
||||
func (m *Metrics) labelIsAllowed(label *Label) bool {
|
||||
labelName := (*label).Name
|
||||
if m.blockedLabels != nil {
|
||||
_, ok := m.blockedLabels[labelName]
|
||||
if ok {
|
||||
// If present, let's remove this label
|
||||
return false
|
||||
}
|
||||
}
|
||||
if m.allowedLabels != nil {
|
||||
_, ok := m.allowedLabels[labelName]
|
||||
return ok
|
||||
}
|
||||
// Allow by default
|
||||
return true
|
||||
}
|
||||
|
||||
// filterLabels return only allowed labels
|
||||
// the caller should lock m.filterLock while calling this method
|
||||
func (m *Metrics) filterLabels(labels []Label) []Label {
|
||||
if labels == nil {
|
||||
return nil
|
||||
}
|
||||
toReturn := labels[:0]
|
||||
for _, label := range labels {
|
||||
if m.labelIsAllowed(&label) {
|
||||
toReturn = append(toReturn, label)
|
||||
}
|
||||
}
|
||||
return toReturn
|
||||
}
|
||||
|
||||
// Returns whether the metric should be allowed based on configured prefix filters
|
||||
// Also return the applicable labels
|
||||
func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
|
||||
m.filterLock.RLock()
|
||||
defer m.filterLock.RUnlock()
|
||||
|
||||
if m.filter == nil || m.filter.Len() == 0 {
|
||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
||||
}
|
||||
|
||||
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
|
||||
if !ok {
|
||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
||||
}
|
||||
|
||||
return allowed.(bool), m.filterLabels(labels)
|
||||
}
|
||||
|
||||
// Periodically collects runtime stats to publish
|
||||
func (m *Metrics) collectStats() {
|
||||
for {
|
||||
time.Sleep(m.ProfileInterval)
|
||||
m.emitRuntimeStats()
|
||||
}
|
||||
}
|
||||
|
||||
// Emits various runtime statsitics
|
||||
func (m *Metrics) emitRuntimeStats() {
|
||||
// Export number of Goroutines
|
||||
numRoutines := runtime.NumGoroutine()
|
||||
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
|
||||
|
||||
// Export memory stats
|
||||
var stats runtime.MemStats
|
||||
runtime.ReadMemStats(&stats)
|
||||
m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
|
||||
m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
|
||||
m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
|
||||
m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
|
||||
m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
|
||||
m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
|
||||
m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
|
||||
|
||||
// Export info about the last few GC runs
|
||||
num := stats.NumGC
|
||||
|
||||
// Handle wrap around
|
||||
if num < m.lastNumGC {
|
||||
m.lastNumGC = 0
|
||||
}
|
||||
|
||||
// Ensure we don't scan more than 256
|
||||
if num-m.lastNumGC >= 256 {
|
||||
m.lastNumGC = num - 255
|
||||
}
|
||||
|
||||
for i := m.lastNumGC; i < num; i++ {
|
||||
pause := stats.PauseNs[i%256]
|
||||
m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
|
||||
}
|
||||
m.lastNumGC = num
|
||||
}
|
||||
|
||||
// Inserts a string value at an index into the slice
|
||||
func insert(i int, v string, s []string) []string {
|
||||
s = append(s, "")
|
||||
copy(s[i+1:], s[i:])
|
||||
s[i] = v
|
||||
return s
|
||||
}
|
||||
115
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
Normal file
115
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// The MetricSink interface is used to transmit metrics information
|
||||
// to an external system
|
||||
type MetricSink interface {
|
||||
// A Gauge should retain the last value it is set to
|
||||
SetGauge(key []string, val float32)
|
||||
SetGaugeWithLabels(key []string, val float32, labels []Label)
|
||||
|
||||
// Should emit a Key/Value pair for each call
|
||||
EmitKey(key []string, val float32)
|
||||
|
||||
// Counters should accumulate values
|
||||
IncrCounter(key []string, val float32)
|
||||
IncrCounterWithLabels(key []string, val float32, labels []Label)
|
||||
|
||||
// Samples are for timing information, where quantiles are used
|
||||
AddSample(key []string, val float32)
|
||||
AddSampleWithLabels(key []string, val float32, labels []Label)
|
||||
}
|
||||
|
||||
// BlackholeSink is used to just blackhole messages
|
||||
type BlackholeSink struct{}
|
||||
|
||||
func (*BlackholeSink) SetGauge(key []string, val float32) {}
|
||||
func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
|
||||
func (*BlackholeSink) EmitKey(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
|
||||
func (*BlackholeSink) AddSample(key []string, val float32) {}
|
||||
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
|
||||
|
||||
// FanoutSink is used to sink to fanout values to multiple sinks
|
||||
type FanoutSink []MetricSink
|
||||
|
||||
func (fh FanoutSink) SetGauge(key []string, val float32) {
|
||||
fh.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.SetGaugeWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) EmitKey(key []string, val float32) {
|
||||
for _, s := range fh {
|
||||
s.EmitKey(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) IncrCounter(key []string, val float32) {
|
||||
fh.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.IncrCounterWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) AddSample(key []string, val float32) {
|
||||
fh.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.AddSampleWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
|
||||
// by each sink type
|
||||
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
|
||||
|
||||
// sinkRegistry supports the generic NewMetricSink function by mapping URL
|
||||
// schemes to metric sink factory functions
|
||||
var sinkRegistry = map[string]sinkURLFactoryFunc{
|
||||
"statsd": NewStatsdSinkFromURL,
|
||||
"statsite": NewStatsiteSinkFromURL,
|
||||
"inmem": NewInmemSinkFromURL,
|
||||
}
|
||||
|
||||
// NewMetricSinkFromURL allows a generic URL input to configure any of the
|
||||
// supported sinks. The scheme of the URL identifies the type of the sink, the
|
||||
// and query parameters are used to set options.
|
||||
//
|
||||
// "statsd://" - Initializes a StatsdSink. The host and port are passed through
|
||||
// as the "addr" of the sink
|
||||
//
|
||||
// "statsite://" - Initializes a StatsiteSink. The host and port become the
|
||||
// "addr" of the sink
|
||||
//
|
||||
// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
|
||||
// "interval" and "duration" query parameters must be specified with valid
|
||||
// durations, see NewInmemSink for details.
|
||||
func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sinkURLFactoryFunc := sinkRegistry[u.Scheme]
|
||||
if sinkURLFactoryFunc == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
|
||||
}
|
||||
|
||||
return sinkURLFactoryFunc(u)
|
||||
}
|
||||
141
vendor/github.com/armon/go-metrics/start.go
generated
vendored
Normal file
141
vendor/github.com/armon/go-metrics/start.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
// Config is used to configure metrics settings
|
||||
type Config struct {
|
||||
ServiceName string // Prefixed with keys to separate services
|
||||
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
|
||||
EnableHostname bool // Enable prefixing gauge values with hostname
|
||||
EnableHostnameLabel bool // Enable adding hostname to labels
|
||||
EnableServiceLabel bool // Enable adding service to labels
|
||||
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
|
||||
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
|
||||
TimerGranularity time.Duration // Granularity of timers.
|
||||
ProfileInterval time.Duration // Interval to profile runtime metrics
|
||||
|
||||
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
|
||||
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
|
||||
AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
|
||||
BlockedLabels []string // A list of metric labels to block, with '.' as the separator
|
||||
FilterDefault bool // Whether to allow metrics by default
|
||||
}
|
||||
|
||||
// Metrics represents an instance of a metrics sink that can
|
||||
// be used to emit
|
||||
type Metrics struct {
|
||||
Config
|
||||
lastNumGC uint32
|
||||
sink MetricSink
|
||||
filter *iradix.Tree
|
||||
allowedLabels map[string]bool
|
||||
blockedLabels map[string]bool
|
||||
filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
|
||||
}
|
||||
|
||||
// Shared global metrics instance
|
||||
var globalMetrics atomic.Value // *Metrics
|
||||
|
||||
func init() {
|
||||
// Initialize to a blackhole sink to avoid errors
|
||||
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
|
||||
}
|
||||
|
||||
// DefaultConfig provides a sane default configuration
|
||||
func DefaultConfig(serviceName string) *Config {
|
||||
c := &Config{
|
||||
ServiceName: serviceName, // Use client provided service
|
||||
HostName: "",
|
||||
EnableHostname: true, // Enable hostname prefix
|
||||
EnableRuntimeMetrics: true, // Enable runtime profiling
|
||||
EnableTypePrefix: false, // Disable type prefix
|
||||
TimerGranularity: time.Millisecond, // Timers are in milliseconds
|
||||
ProfileInterval: time.Second, // Poll runtime every second
|
||||
FilterDefault: true, // Don't filter metrics by default
|
||||
}
|
||||
|
||||
// Try to get the hostname
|
||||
name, _ := os.Hostname()
|
||||
c.HostName = name
|
||||
return c
|
||||
}
|
||||
|
||||
// New is used to create a new instance of Metrics
|
||||
func New(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
met := &Metrics{}
|
||||
met.Config = *conf
|
||||
met.sink = sink
|
||||
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
|
||||
|
||||
// Start the runtime collector
|
||||
if conf.EnableRuntimeMetrics {
|
||||
go met.collectStats()
|
||||
}
|
||||
return met, nil
|
||||
}
|
||||
|
||||
// NewGlobal is the same as New, but it assigns the metrics object to be
|
||||
// used globally as well as returning it.
|
||||
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
metrics, err := New(conf, sink)
|
||||
if err == nil {
|
||||
globalMetrics.Store(metrics)
|
||||
}
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
// Proxy all the methods to the globalMetrics instance
|
||||
func SetGauge(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).SetGauge(key, val)
|
||||
}
|
||||
|
||||
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func EmitKey(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).EmitKey(key, val)
|
||||
}
|
||||
|
||||
func IncrCounter(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).IncrCounter(key, val)
|
||||
}
|
||||
|
||||
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func AddSample(key []string, val float32) {
|
||||
globalMetrics.Load().(*Metrics).AddSample(key, val)
|
||||
}
|
||||
|
||||
func AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func MeasureSince(key []string, start time.Time) {
|
||||
globalMetrics.Load().(*Metrics).MeasureSince(key, start)
|
||||
}
|
||||
|
||||
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
|
||||
}
|
||||
|
||||
func UpdateFilter(allow, block []string) {
|
||||
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
|
||||
}
|
||||
|
||||
// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
|
||||
// and blockedLabels - when not nil - allow filtering of labels in order to
|
||||
// block/allow globally labels (especially useful when having large number of
|
||||
// values for a given label). See README.md for more information about usage.
|
||||
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
||||
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
|
||||
}
|
||||
184
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
Normal file
184
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// statsdMaxLen is the maximum size of a packet
|
||||
// to send to statsd
|
||||
statsdMaxLen = 1400
|
||||
)
|
||||
|
||||
// StatsdSink provides a MetricSink that can be used
|
||||
// with a statsite or statsd metrics server. It uses
|
||||
// only UDP packets, while StatsiteSink uses TCP.
|
||||
type StatsdSink struct {
|
||||
addr string
|
||||
metricQueue chan string
|
||||
}
|
||||
|
||||
// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
return NewStatsdSink(u.Host)
|
||||
}
|
||||
|
||||
// NewStatsdSink is used to create a new StatsdSink
|
||||
func NewStatsdSink(addr string) (*StatsdSink, error) {
|
||||
s := &StatsdSink{
|
||||
addr: addr,
|
||||
metricQueue: make(chan string, 4096),
|
||||
}
|
||||
go s.flushMetrics()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close is used to stop flushing to statsd
|
||||
func (s *StatsdSink) Shutdown() {
|
||||
close(s.metricQueue)
|
||||
}
|
||||
|
||||
func (s *StatsdSink) SetGauge(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) IncrCounter(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsdSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Flattens the key along with labels for formatting, removes spaces
|
||||
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
|
||||
for _, label := range labels {
|
||||
parts = append(parts, label.Value)
|
||||
}
|
||||
return s.flattenKey(parts)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsdSink) pushMetric(m string) {
|
||||
select {
|
||||
case s.metricQueue <- m:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes metrics
|
||||
func (s *StatsdSink) flushMetrics() {
|
||||
var sock net.Conn
|
||||
var err error
|
||||
var wait <-chan time.Time
|
||||
ticker := time.NewTicker(flushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
CONNECT:
|
||||
// Create a buffer
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
// Attempt to connect
|
||||
sock, err = net.Dial("udp", s.addr)
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-s.metricQueue:
|
||||
// Get a metric from the queue
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
|
||||
// Check if this would overflow the packet size
|
||||
if len(metric)+buf.Len() > statsdMaxLen {
|
||||
_, err := sock.Write(buf.Bytes())
|
||||
buf.Reset()
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error writing to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
|
||||
// Append to the buffer
|
||||
buf.WriteString(metric)
|
||||
|
||||
case <-ticker.C:
|
||||
if buf.Len() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := sock.Write(buf.Bytes())
|
||||
buf.Reset()
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Wait for a while
|
||||
wait = time.After(time.Duration(5) * time.Second)
|
||||
for {
|
||||
select {
|
||||
// Dequeue the messages to avoid backlog
|
||||
case _, ok := <-s.metricQueue:
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
case <-wait:
|
||||
goto CONNECT
|
||||
}
|
||||
}
|
||||
QUIT:
|
||||
s.metricQueue = nil
|
||||
}
|
||||
172
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
Normal file
172
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// We force flush the statsite metrics after this period of
|
||||
// inactivity. Prevents stats from getting stuck in a buffer
|
||||
// forever.
|
||||
flushInterval = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
return NewStatsiteSink(u.Host)
|
||||
}
|
||||
|
||||
// StatsiteSink provides a MetricSink that can be used with a
|
||||
// statsite metrics server
|
||||
type StatsiteSink struct {
|
||||
addr string
|
||||
metricQueue chan string
|
||||
}
|
||||
|
||||
// NewStatsiteSink is used to create a new StatsiteSink
|
||||
func NewStatsiteSink(addr string) (*StatsiteSink, error) {
|
||||
s := &StatsiteSink{
|
||||
addr: addr,
|
||||
metricQueue: make(chan string, 4096),
|
||||
}
|
||||
go s.flushMetrics()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close is used to stop flushing to statsite
|
||||
func (s *StatsiteSink) Shutdown() {
|
||||
close(s.metricQueue)
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) SetGauge(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) IncrCounter(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsiteSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Flattens the key along with labels for formatting, removes spaces
|
||||
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
|
||||
for _, label := range labels {
|
||||
parts = append(parts, label.Value)
|
||||
}
|
||||
return s.flattenKey(parts)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsiteSink) pushMetric(m string) {
|
||||
select {
|
||||
case s.metricQueue <- m:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes metrics
|
||||
func (s *StatsiteSink) flushMetrics() {
|
||||
var sock net.Conn
|
||||
var err error
|
||||
var wait <-chan time.Time
|
||||
var buffered *bufio.Writer
|
||||
ticker := time.NewTicker(flushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
CONNECT:
|
||||
// Attempt to connect
|
||||
sock, err = net.Dial("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Create a buffered writer
|
||||
buffered = bufio.NewWriter(sock)
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-s.metricQueue:
|
||||
// Get a metric from the queue
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
|
||||
// Try to send to statsite
|
||||
_, err := buffered.Write([]byte(metric))
|
||||
if err != nil {
|
||||
log.Printf("[ERR] Error writing to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
case <-ticker.C:
|
||||
if err := buffered.Flush(); err != nil {
|
||||
log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
|
||||
goto WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Wait for a while
|
||||
wait = time.After(time.Duration(5) * time.Second)
|
||||
for {
|
||||
select {
|
||||
// Dequeue the messages to avoid backlog
|
||||
case _, ok := <-s.metricQueue:
|
||||
if !ok {
|
||||
goto QUIT
|
||||
}
|
||||
case <-wait:
|
||||
goto CONNECT
|
||||
}
|
||||
}
|
||||
QUIT:
|
||||
s.metricQueue = nil
|
||||
}
|
||||
21
vendor/github.com/avast/retry-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/avast/retry-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Avast
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
108
vendor/github.com/avast/retry-go/options.go
generated
vendored
Normal file
108
vendor/github.com/avast/retry-go/options.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Function signature of retry if function
|
||||
type RetryIfFunc func(error) bool
|
||||
|
||||
// Function signature of OnRetry function
|
||||
// n = count of attempts
|
||||
type OnRetryFunc func(n uint, err error)
|
||||
|
||||
type DelayTypeFunc func(n uint, config *config) time.Duration
|
||||
|
||||
type config struct {
|
||||
attempts uint
|
||||
delay time.Duration
|
||||
onRetry OnRetryFunc
|
||||
retryIf RetryIfFunc
|
||||
delayType DelayTypeFunc
|
||||
lastErrorOnly bool
|
||||
}
|
||||
|
||||
// Option represents an option for retry.
|
||||
type Option func(*config)
|
||||
|
||||
// return the direct last error that came from the retried function
|
||||
// default is false (return wrapped errors with everything)
|
||||
func LastErrorOnly(lastErrorOnly bool) Option {
|
||||
return func(c *config) {
|
||||
c.lastErrorOnly = lastErrorOnly
|
||||
}
|
||||
}
|
||||
|
||||
// Attempts set count of retry
|
||||
// default is 10
|
||||
func Attempts(attempts uint) Option {
|
||||
return func(c *config) {
|
||||
c.attempts = attempts
|
||||
}
|
||||
}
|
||||
|
||||
// Delay set delay between retry
|
||||
// default is 100ms
|
||||
func Delay(delay time.Duration) Option {
|
||||
return func(c *config) {
|
||||
c.delay = delay
|
||||
}
|
||||
}
|
||||
|
||||
// DelayType set type of the delay between retries
|
||||
// default is BackOff
|
||||
func DelayType(delayType DelayTypeFunc) Option {
|
||||
return func(c *config) {
|
||||
c.delayType = delayType
|
||||
}
|
||||
}
|
||||
|
||||
// BackOffDelay is a DelayType which increases delay between consecutive retries
|
||||
func BackOffDelay(n uint, config *config) time.Duration {
|
||||
return config.delay * (1 << (n - 1))
|
||||
}
|
||||
|
||||
// FixedDelay is a DelayType which keeps delay the same through all iterations
|
||||
func FixedDelay(_ uint, config *config) time.Duration {
|
||||
return config.delay
|
||||
}
|
||||
|
||||
// OnRetry function callback are called each retry
|
||||
//
|
||||
// log each retry example:
|
||||
//
|
||||
// retry.Do(
|
||||
// func() error {
|
||||
// return errors.New("some error")
|
||||
// },
|
||||
// retry.OnRetry(func(n uint, err error) {
|
||||
// log.Printf("#%d: %s\n", n, err)
|
||||
// }),
|
||||
// )
|
||||
func OnRetry(onRetry OnRetryFunc) Option {
|
||||
return func(c *config) {
|
||||
c.onRetry = onRetry
|
||||
}
|
||||
}
|
||||
|
||||
// RetryIf controls whether a retry should be attempted after an error
|
||||
// (assuming there are any retry attempts remaining)
|
||||
//
|
||||
// skip retry if special error example:
|
||||
//
|
||||
// retry.Do(
|
||||
// func() error {
|
||||
// return errors.New("special error")
|
||||
// },
|
||||
// retry.RetryIf(func(err error) bool {
|
||||
// if err.Error() == "special error" {
|
||||
// return false
|
||||
// }
|
||||
// return true
|
||||
// })
|
||||
// )
|
||||
func RetryIf(retryIf RetryIfFunc) Option {
|
||||
return func(c *config) {
|
||||
c.retryIf = retryIf
|
||||
}
|
||||
}
|
||||
159
vendor/github.com/avast/retry-go/retry.go
generated
vendored
Normal file
159
vendor/github.com/avast/retry-go/retry.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
Simple library for retry mechanism
|
||||
|
||||
slightly inspired by [Try::Tiny::Retry](https://metacpan.org/pod/Try::Tiny::Retry)
|
||||
|
||||
SYNOPSIS
|
||||
|
||||
http get with retry:
|
||||
|
||||
url := "http://example.com"
|
||||
var body []byte
|
||||
|
||||
err := retry.Do(
|
||||
func() error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
fmt.Println(body)
|
||||
|
||||
[next examples](https://github.com/avast/retry-go/tree/master/examples)
|
||||
|
||||
|
||||
SEE ALSO
|
||||
|
||||
* [giantswarm/retry-go](https://github.com/giantswarm/retry-go) - slightly complicated interface.
|
||||
|
||||
* [sethgrid/pester](https://github.com/sethgrid/pester) - only http retry for http calls with retries and backoff
|
||||
|
||||
* [cenkalti/backoff](https://github.com/cenkalti/backoff) - Go port of the exponential backoff algorithm from Google's HTTP Client Library for Java. Really complicated interface.
|
||||
|
||||
* [rafaeljesus/retry-go](https://github.com/rafaeljesus/retry-go) - looks good, slightly similar as this package, don't have 'simple' `Retry` method
|
||||
|
||||
* [matryer/try](https://github.com/matryer/try) - very popular package, nonintuitive interface (for me)
|
||||
|
||||
BREAKING CHANGES
|
||||
|
||||
1.0.2 -> 2.0.0
|
||||
|
||||
* argument of `retry.Delay` is final delay (no multiplication by `retry.Units` anymore)
|
||||
|
||||
* function `retry.Units` are removed
|
||||
|
||||
* [more about this breaking change](https://github.com/avast/retry-go/issues/7)
|
||||
|
||||
|
||||
0.3.0 -> 1.0.0
|
||||
|
||||
* `retry.Retry` function are changed to `retry.Do` function
|
||||
|
||||
* `retry.RetryCustom` (OnRetry) and `retry.RetryCustomWithOpts` functions are now implement via functions produces Options (aka `retry.OnRetry`)
|
||||
|
||||
|
||||
*/
|
||||
package retry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Function signature of retryable function
|
||||
type RetryableFunc func() error
|
||||
|
||||
func Do(retryableFunc RetryableFunc, opts ...Option) error {
|
||||
var n uint
|
||||
|
||||
//default
|
||||
config := &config{
|
||||
attempts: 10,
|
||||
delay: 100 * time.Millisecond,
|
||||
onRetry: func(n uint, err error) {},
|
||||
retryIf: func(err error) bool { return true },
|
||||
delayType: BackOffDelay,
|
||||
lastErrorOnly: false,
|
||||
}
|
||||
|
||||
//apply opts
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
errorLog := make(Error, config.attempts)
|
||||
|
||||
for n < config.attempts {
|
||||
err := retryableFunc()
|
||||
|
||||
if err != nil {
|
||||
config.onRetry(n, err)
|
||||
errorLog[n] = err
|
||||
|
||||
if !config.retryIf(err) {
|
||||
break
|
||||
}
|
||||
|
||||
// if this is last attempt - don't wait
|
||||
if n == config.attempts-1 {
|
||||
break
|
||||
}
|
||||
|
||||
delayTime := config.delayType(n, config)
|
||||
time.Sleep(delayTime)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
n++
|
||||
}
|
||||
|
||||
if config.lastErrorOnly {
|
||||
return errorLog[n]
|
||||
}
|
||||
return errorLog
|
||||
}
|
||||
|
||||
// Error type represents list of errors in retry
|
||||
type Error []error
|
||||
|
||||
// Error method return string representation of Error
|
||||
// It is an implementation of error interface
|
||||
func (e Error) Error() string {
|
||||
logWithNumber := make([]string, lenWithoutNil(e))
|
||||
for i, l := range e {
|
||||
if l != nil {
|
||||
logWithNumber[i] = fmt.Sprintf("#%d: %s", i+1, l.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("All attempts fail:\n%s", strings.Join(logWithNumber, "\n"))
|
||||
}
|
||||
|
||||
func lenWithoutNil(e Error) (count int) {
|
||||
for _, v := range e {
|
||||
if v != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// WrappedErrors returns the list of errors that this Error is wrapping.
|
||||
// It is an implementation of the `errwrap.Wrapper` interface
|
||||
// in package [errwrap](https://github.com/hashicorp/errwrap) so that
|
||||
// `retry.Error` can be used with that library.
|
||||
func (e Error) WrappedErrors() []error {
|
||||
return e
|
||||
}
|
||||
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
Normal file
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
Copyright (c) 2014, Evan Phoenix
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
Normal file
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
Normal file
@@ -0,0 +1,383 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
||||
curDoc, err := cur.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
pruneNulls(patch)
|
||||
return patch
|
||||
}
|
||||
|
||||
patchDoc, err := patch.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return patch
|
||||
}
|
||||
|
||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
||||
|
||||
return cur
|
||||
}
|
||||
|
||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
||||
for k, v := range *patch {
|
||||
if v == nil {
|
||||
if mergeMerge {
|
||||
(*doc)[k] = nil
|
||||
} else {
|
||||
delete(*doc, k)
|
||||
}
|
||||
} else {
|
||||
cur, ok := (*doc)[k]
|
||||
|
||||
if !ok || cur == nil {
|
||||
pruneNulls(v)
|
||||
(*doc)[k] = v
|
||||
} else {
|
||||
(*doc)[k] = merge(cur, v, mergeMerge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneNulls(n *lazyNode) {
|
||||
sub, err := n.intoDoc()
|
||||
|
||||
if err == nil {
|
||||
pruneDocNulls(sub)
|
||||
} else {
|
||||
ary, err := n.intoAry()
|
||||
|
||||
if err == nil {
|
||||
pruneAryNulls(ary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
||||
for k, v := range *doc {
|
||||
if v == nil {
|
||||
delete(*doc, k)
|
||||
} else {
|
||||
pruneNulls(v)
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
if v != nil {
|
||||
pruneNulls(v)
|
||||
newAry = append(newAry, v)
|
||||
}
|
||||
}
|
||||
|
||||
*ary = newAry
|
||||
|
||||
return ary
|
||||
}
|
||||
|
||||
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
|
||||
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
|
||||
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
|
||||
|
||||
// MergeMergePatches merges two merge patches together, such that
|
||||
// applying this resulting merged merge patch to a document yields the same
|
||||
// as merging each merge patch to the document in succession.
|
||||
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
|
||||
return doMergePatch(patch1Data, patch2Data, true)
|
||||
}
|
||||
|
||||
// MergePatch merges the patchData into the docData.
|
||||
func MergePatch(docData, patchData []byte) ([]byte, error) {
|
||||
return doMergePatch(docData, patchData, false)
|
||||
}
|
||||
|
||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
doc := &partialDoc{}
|
||||
|
||||
docErr := json.Unmarshal(docData, doc)
|
||||
|
||||
patch := &partialDoc{}
|
||||
|
||||
patchErr := json.Unmarshal(patchData, patch)
|
||||
|
||||
if _, ok := docErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if _, ok := patchErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr == nil && *doc == nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if patchErr == nil && *patch == nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr != nil || patchErr != nil {
|
||||
// Not an error, just not a doc, so we turn straight into the patch
|
||||
if patchErr == nil {
|
||||
if mergeMerge {
|
||||
doc = patch
|
||||
} else {
|
||||
doc = pruneDocNulls(patch)
|
||||
}
|
||||
} else {
|
||||
patchAry := &partialArray{}
|
||||
patchErr = json.Unmarshal(patchData, patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
pruneAryNulls(patchAry)
|
||||
|
||||
out, patchErr := json.Marshal(patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
} else {
|
||||
mergeDocs(doc, patch, mergeMerge)
|
||||
}
|
||||
|
||||
return json.Marshal(doc)
|
||||
}
|
||||
|
||||
// resemblesJSONArray indicates whether the byte-slice "appears" to be
|
||||
// a JSON array or not.
|
||||
// False-positives are possible, as this function does not check the internal
|
||||
// structure of the array. It only checks that the outer syntax is present and
|
||||
// correct.
|
||||
func resemblesJSONArray(input []byte) bool {
|
||||
input = bytes.TrimSpace(input)
|
||||
|
||||
hasPrefix := bytes.HasPrefix(input, []byte("["))
|
||||
hasSuffix := bytes.HasSuffix(input, []byte("]"))
|
||||
|
||||
return hasPrefix && hasSuffix
|
||||
}
|
||||
|
||||
// CreateMergePatch will return a merge patch document capable of converting
|
||||
// the original document(s) to the modified document(s).
|
||||
// The parameters can be bytes of either two JSON Documents, or two arrays of
|
||||
// JSON documents.
|
||||
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
|
||||
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalResemblesArray := resemblesJSONArray(originalJSON)
|
||||
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
|
||||
|
||||
// Do both byte-slices seem like JSON arrays?
|
||||
if originalResemblesArray && modifiedResemblesArray {
|
||||
return createArrayMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// Are both byte-slices are not arrays? Then they are likely JSON objects...
|
||||
if !originalResemblesArray && !modifiedResemblesArray {
|
||||
return createObjectMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// None of the above? Then return an error because of mismatched types.
|
||||
return nil, errBadMergeTypes
|
||||
}
|
||||
|
||||
// createObjectMergePatch will return a merge-patch document capable of
|
||||
// converting the original document to the modified document.
|
||||
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDoc := map[string]interface{}{}
|
||||
modifiedDoc := map[string]interface{}{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
dest, err := getDiff(originalDoc, modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return json.Marshal(dest)
|
||||
}
|
||||
|
||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
||||
// of converting the original document to the modified document for each
|
||||
// pair of JSON documents provided in the arrays.
|
||||
// Arrays of mismatched sizes will result in an error.
|
||||
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDocs := []json.RawMessage{}
|
||||
modifiedDocs := []json.RawMessage{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
total := len(originalDocs)
|
||||
if len(modifiedDocs) != total {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
result := []json.RawMessage{}
|
||||
for i := 0; i < len(originalDocs); i++ {
|
||||
original := originalDocs[i]
|
||||
modified := modifiedDocs[i]
|
||||
|
||||
patch, err := createObjectMergePatch(original, modified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, json.RawMessage(patch))
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
}
|
||||
|
||||
// Returns true if the array matches (must be json types).
|
||||
// As is idiomatic for go, an empty array is not the same as a nil array.
|
||||
func matchesArray(a, b []interface{}) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if (a == nil && b != nil) || (a != nil && b == nil) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !matchesValue(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the values matches (must be json types)
|
||||
// The types of the values must match, otherwise it will always return false
|
||||
// If two map[string]interface{} are given, all elements must match.
|
||||
func matchesValue(av, bv interface{}) bool {
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
return false
|
||||
}
|
||||
switch at := av.(type) {
|
||||
case string:
|
||||
bt := bv.(string)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
bt := bv.(float64)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case bool:
|
||||
bt := bv.(bool)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
return true
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
for key := range at {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for key := range bt {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
return matchesArray(at, bt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
|
||||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
into := map[string]interface{}{}
|
||||
for key, bv := range b {
|
||||
av, ok := a[key]
|
||||
// value was added
|
||||
if !ok {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// If types have changed, replace completely
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// Types are the same, compare values
|
||||
switch at := av.(type) {
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
dst := make(map[string]interface{}, len(bt))
|
||||
dst, err := getDiff(at, bt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
}
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
if !matchesArray(at, bt) {
|
||||
into[key] = bv
|
||||
}
|
||||
case nil:
|
||||
switch bv.(type) {
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
default:
|
||||
into[key] = bv
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
||||
}
|
||||
}
|
||||
// Now add all deleted values as nil
|
||||
for key := range a {
|
||||
_, found := b[key]
|
||||
if !found {
|
||||
into[key] = nil
|
||||
}
|
||||
}
|
||||
return into, nil
|
||||
}
|
||||
682
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
Normal file
682
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
Normal file
@@ -0,0 +1,682 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
eRaw = iota
|
||||
eDoc
|
||||
eAry
|
||||
)
|
||||
|
||||
var SupportNegativeIndices bool = true
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
ary partialArray
|
||||
which int
|
||||
}
|
||||
|
||||
type operation map[string]*json.RawMessage
|
||||
|
||||
// Patch is an ordered collection of operations.
|
||||
type Patch []operation
|
||||
|
||||
type partialDoc map[string]*lazyNode
|
||||
type partialArray []*lazyNode
|
||||
|
||||
type container interface {
|
||||
get(key string) (*lazyNode, error)
|
||||
set(key string, val *lazyNode) error
|
||||
add(key string, val *lazyNode) error
|
||||
remove(key string) error
|
||||
}
|
||||
|
||||
func newLazyNode(raw *json.RawMessage) *lazyNode {
|
||||
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
|
||||
}
|
||||
|
||||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return json.Marshal(n.raw)
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown type")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||
dest := make(json.RawMessage, len(data))
|
||||
copy(dest, data)
|
||||
n.raw = &dest
|
||||
n.which = eRaw
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
if n.which == eAry {
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) compact() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := json.Compact(buf, *n.raw)
|
||||
|
||||
if err != nil {
|
||||
return *n.raw
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryDoc() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryAry() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
if n.which == eRaw {
|
||||
if !n.tryDoc() && !n.tryAry() {
|
||||
if o.which != eRaw {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), o.compact())
|
||||
}
|
||||
}
|
||||
|
||||
if n.which == eDoc {
|
||||
if o.which == eRaw {
|
||||
if !o.tryDoc() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if o.which != eDoc {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range n.doc {
|
||||
ov, ok := o.doc[k]
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == nil && ov == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v.equal(ov) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if o.which != eAry && !o.tryAry() {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.ary) != len(o.ary) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, val := range n.ary {
|
||||
if !val.equal(o.ary[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (o operation) kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) path() string {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) from() string {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isArray(buf []byte) bool {
|
||||
Loop:
|
||||
for _, c := range buf {
|
||||
switch c {
|
||||
case ' ':
|
||||
case '\n':
|
||||
case '\t':
|
||||
continue
|
||||
case '[':
|
||||
return true
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func findObject(pd *container, path string) (container, string) {
|
||||
doc := *pd
|
||||
|
||||
split := strings.Split(path, "/")
|
||||
|
||||
if len(split) < 2 {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
parts := split[1 : len(split)-1]
|
||||
|
||||
key := split[len(split)-1]
|
||||
|
||||
var err error
|
||||
|
||||
for _, part := range parts {
|
||||
|
||||
next, ok := doc.get(decodePatchKey(part))
|
||||
|
||||
if next == nil || ok != nil {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
if isArray(*next.raw) {
|
||||
doc, err = next.intoAry()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
} else {
|
||||
doc, err = next.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc, decodePatchKey(key)
|
||||
}
|
||||
|
||||
func (d *partialDoc) set(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) add(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
return (*d)[key], nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to remove nonexistent key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) set(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sz := len(*d)
|
||||
if idx+1 > sz {
|
||||
sz = idx + 1
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
copy(ary, cur)
|
||||
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
ary[idx] = val
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(*d)+1)
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(ary)
|
||||
}
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) get(key string) (*lazyNode, error) {
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(cur)
|
||||
}
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[idx:], cur[idx+1:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p Patch) add(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.add(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.remove(key)
|
||||
}
|
||||
|
||||
func (p Patch) replace(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op operation) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, val)
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
func (p Patch) copy(doc *container, op operation) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, val)
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
func Equal(a, b []byte) bool {
|
||||
ra := make(json.RawMessage, len(a))
|
||||
copy(ra, a)
|
||||
la := newLazyNode(&ra)
|
||||
|
||||
rb := make(json.RawMessage, len(b))
|
||||
copy(rb, b)
|
||||
lb := newLazyNode(&rb)
|
||||
|
||||
return la.equal(lb)
|
||||
}
|
||||
|
||||
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
|
||||
func DecodePatch(buf []byte) (Patch, error) {
|
||||
var p Patch
|
||||
|
||||
err := json.Unmarshal(buf, &p)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Apply mutates a JSON document according to the patch, and returns the new
|
||||
// document.
|
||||
func (p Patch) Apply(doc []byte) ([]byte, error) {
|
||||
return p.ApplyIndent(doc, "")
|
||||
}
|
||||
|
||||
// ApplyIndent mutates a JSON document according to the patch, and returns the new
|
||||
// document indented.
|
||||
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{}
|
||||
} else {
|
||||
pd = &partialDoc{}
|
||||
}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = nil
|
||||
|
||||
for _, op := range p {
|
||||
switch op.kind() {
|
||||
case "add":
|
||||
err = p.add(&pd, op)
|
||||
case "remove":
|
||||
err = p.remove(&pd, op)
|
||||
case "replace":
|
||||
err = p.replace(&pd, op)
|
||||
case "move":
|
||||
err = p.move(&pd, op)
|
||||
case "test":
|
||||
err = p.test(&pd, op)
|
||||
case "copy":
|
||||
err = p.copy(&pd, op)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.kind())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if indent != "" {
|
||||
return json.MarshalIndent(pd, "", indent)
|
||||
}
|
||||
|
||||
return json.Marshal(pd)
|
||||
}
|
||||
|
||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||
//
|
||||
// Evaluation of each reference token begins by decoding any escaped
|
||||
// character sequence. This is performed by first transforming any
|
||||
// occurrence of the sequence '~1' to '/', and then transforming any
|
||||
// occurrence of the sequence '~0' to '~'.
|
||||
|
||||
var (
|
||||
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
|
||||
)
|
||||
|
||||
func decodePatchKey(k string) string {
|
||||
return rfc6901Decoder.Replace(k)
|
||||
}
|
||||
169
vendor/github.com/gogo/protobuf/gogoproto/doc.go
generated
vendored
Normal file
169
vendor/github.com/gogo/protobuf/gogoproto/doc.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package gogoproto provides extensions for protocol buffers to achieve:
|
||||
|
||||
- fast marshalling and unmarshalling.
|
||||
- peace of mind by optionally generating test and benchmark code.
|
||||
- more canonical Go structures.
|
||||
- less typing by optionally generating extra helper code.
|
||||
- goprotobuf compatibility
|
||||
|
||||
More Canonical Go Structures
|
||||
|
||||
A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
|
||||
You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
|
||||
Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
|
||||
|
||||
- nullable, if false, a field is generated without a pointer (see warning below).
|
||||
- embed, if true, the field is generated as an embedded field.
|
||||
- customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
|
||||
- customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
|
||||
- casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums.
|
||||
- castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
|
||||
- castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
|
||||
|
||||
Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
|
||||
|
||||
Let us look at:
|
||||
|
||||
github.com/gogo/protobuf/test/example/example.proto
|
||||
|
||||
for a quicker overview.
|
||||
|
||||
The following message:
|
||||
|
||||
package test;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
message A {
|
||||
optional string Description = 1 [(gogoproto.nullable) = false];
|
||||
optional int64 Number = 2 [(gogoproto.nullable) = false];
|
||||
optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
Will generate a go struct which looks a lot like this:
|
||||
|
||||
type A struct {
|
||||
Description string
|
||||
Number int64
|
||||
Id github_com_gogo_protobuf_test_custom.Uuid
|
||||
}
|
||||
|
||||
You will see there are no pointers, since all fields are non-nullable.
|
||||
You will also see a custom type which marshals to a string.
|
||||
Be warned it is your responsibility to test your custom types thoroughly.
|
||||
You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
|
||||
|
||||
Next we will embed the message A in message B.
|
||||
|
||||
message B {
|
||||
optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
|
||||
repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
See below that A is embedded in B.
|
||||
|
||||
type B struct {
|
||||
A
|
||||
G []github_com_gogo_protobuf_test_custom.Uint128
|
||||
}
|
||||
|
||||
Also see the repeated custom type.
|
||||
|
||||
type Uint128 [2]uint64
|
||||
|
||||
Next we will create a custom name for one of our fields.
|
||||
|
||||
message C {
|
||||
optional int64 size = 1 [(gogoproto.customname) = "MySize"];
|
||||
}
|
||||
|
||||
See below that the field's name is MySize and not Size.
|
||||
|
||||
type C struct {
|
||||
MySize *int64
|
||||
}
|
||||
|
||||
The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
|
||||
As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
|
||||
Using customname you can fix this error without changing the field name.
|
||||
This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
|
||||
|
||||
Gogoprotobuf also has some more subtle changes, these could be changed back:
|
||||
|
||||
- the generated package name for imports do not have the extra /filename.pb,
|
||||
but are actually the imports specified in the .proto file.
|
||||
|
||||
Gogoprotobuf also has lost some features which should be brought back with time:
|
||||
|
||||
- Marshalling and unmarshalling with reflect and without the unsafe package,
|
||||
this requires work in pointer_reflect.go
|
||||
|
||||
Why does nullable break protocol buffer specifications:
|
||||
|
||||
The protocol buffer specification states, somewhere, that you should be able to tell whether a
|
||||
field is set or unset. With the option nullable=false this feature is lost,
|
||||
since your non-nullable fields will always be set. It can be seen as a layer on top of
|
||||
protocol buffers, where before and after marshalling all non-nullable fields are set
|
||||
and they cannot be unset.
|
||||
|
||||
Goprotobuf Compatibility:
|
||||
|
||||
Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
|
||||
Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
|
||||
The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
|
||||
|
||||
- gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
|
||||
- goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
|
||||
- goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
|
||||
- goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
|
||||
- goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
|
||||
- goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
|
||||
- goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
|
||||
- goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway).
|
||||
|
||||
Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
|
||||
|
||||
- github.com/gogo/protobuf/plugin/<extension_name>
|
||||
|
||||
If you do not use any of these extension the code that is generated
|
||||
will be the same as if goprotobuf has generated it.
|
||||
|
||||
The most complete way to see examples is to look at
|
||||
|
||||
github.com/gogo/protobuf/test/thetest.proto
|
||||
|
||||
Gogoprototest is a seperate project,
|
||||
because we want to keep gogoprotobuf independent of goprotobuf,
|
||||
but we still want to test it thoroughly.
|
||||
|
||||
*/
|
||||
package gogoproto
|
||||
874
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
generated
vendored
Normal file
874
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
generated
vendored
Normal file
@@ -0,0 +1,874 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: gogo.proto
|
||||
|
||||
package gogoproto
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62001,
|
||||
Name: "gogoproto.goproto_enum_prefix",
|
||||
Tag: "varint,62001,opt,name=goproto_enum_prefix",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62021,
|
||||
Name: "gogoproto.goproto_enum_stringer",
|
||||
Tag: "varint,62021,opt,name=goproto_enum_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62022,
|
||||
Name: "gogoproto.enum_stringer",
|
||||
Tag: "varint,62022,opt,name=enum_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumCustomname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 62023,
|
||||
Name: "gogoproto.enum_customname",
|
||||
Tag: "bytes,62023,opt,name=enum_customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Enumdecl = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62024,
|
||||
Name: "gogoproto.enumdecl",
|
||||
Tag: "varint,62024,opt,name=enumdecl",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumvalueCustomname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumValueOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 66001,
|
||||
Name: "gogoproto.enumvalue_customname",
|
||||
Tag: "bytes,66001,opt,name=enumvalue_customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoGettersAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63001,
|
||||
Name: "gogoproto.goproto_getters_all",
|
||||
Tag: "varint,63001,opt,name=goproto_getters_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63002,
|
||||
Name: "gogoproto.goproto_enum_prefix_all",
|
||||
Tag: "varint,63002,opt,name=goproto_enum_prefix_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63003,
|
||||
Name: "gogoproto.goproto_stringer_all",
|
||||
Tag: "varint,63003,opt,name=goproto_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_VerboseEqualAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63004,
|
||||
Name: "gogoproto.verbose_equal_all",
|
||||
Tag: "varint,63004,opt,name=verbose_equal_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_FaceAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63005,
|
||||
Name: "gogoproto.face_all",
|
||||
Tag: "varint,63005,opt,name=face_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GostringAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63006,
|
||||
Name: "gogoproto.gostring_all",
|
||||
Tag: "varint,63006,opt,name=gostring_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_PopulateAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63007,
|
||||
Name: "gogoproto.populate_all",
|
||||
Tag: "varint,63007,opt,name=populate_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63008,
|
||||
Name: "gogoproto.stringer_all",
|
||||
Tag: "varint,63008,opt,name=stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_OnlyoneAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63009,
|
||||
Name: "gogoproto.onlyone_all",
|
||||
Tag: "varint,63009,opt,name=onlyone_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EqualAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63013,
|
||||
Name: "gogoproto.equal_all",
|
||||
Tag: "varint,63013,opt,name=equal_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_DescriptionAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63014,
|
||||
Name: "gogoproto.description_all",
|
||||
Tag: "varint,63014,opt,name=description_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_TestgenAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63015,
|
||||
Name: "gogoproto.testgen_all",
|
||||
Tag: "varint,63015,opt,name=testgen_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_BenchgenAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63016,
|
||||
Name: "gogoproto.benchgen_all",
|
||||
Tag: "varint,63016,opt,name=benchgen_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_MarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63017,
|
||||
Name: "gogoproto.marshaler_all",
|
||||
Tag: "varint,63017,opt,name=marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnmarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63018,
|
||||
Name: "gogoproto.unmarshaler_all",
|
||||
Tag: "varint,63018,opt,name=unmarshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StableMarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63019,
|
||||
Name: "gogoproto.stable_marshaler_all",
|
||||
Tag: "varint,63019,opt,name=stable_marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_SizerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63020,
|
||||
Name: "gogoproto.sizer_all",
|
||||
Tag: "varint,63020,opt,name=sizer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63021,
|
||||
Name: "gogoproto.goproto_enum_stringer_all",
|
||||
Tag: "varint,63021,opt,name=goproto_enum_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63022,
|
||||
Name: "gogoproto.enum_stringer_all",
|
||||
Tag: "varint,63022,opt,name=enum_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63023,
|
||||
Name: "gogoproto.unsafe_marshaler_all",
|
||||
Tag: "varint,63023,opt,name=unsafe_marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63024,
|
||||
Name: "gogoproto.unsafe_unmarshaler_all",
|
||||
Tag: "varint,63024,opt,name=unsafe_unmarshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63025,
|
||||
Name: "gogoproto.goproto_extensions_map_all",
|
||||
Tag: "varint,63025,opt,name=goproto_extensions_map_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63026,
|
||||
Name: "gogoproto.goproto_unrecognized_all",
|
||||
Tag: "varint,63026,opt,name=goproto_unrecognized_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GogoprotoImport = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63027,
|
||||
Name: "gogoproto.gogoproto_import",
|
||||
Tag: "varint,63027,opt,name=gogoproto_import",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_ProtosizerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63028,
|
||||
Name: "gogoproto.protosizer_all",
|
||||
Tag: "varint,63028,opt,name=protosizer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_CompareAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63029,
|
||||
Name: "gogoproto.compare_all",
|
||||
Tag: "varint,63029,opt,name=compare_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_TypedeclAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63030,
|
||||
Name: "gogoproto.typedecl_all",
|
||||
Tag: "varint,63030,opt,name=typedecl_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumdeclAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63031,
|
||||
Name: "gogoproto.enumdecl_all",
|
||||
Tag: "varint,63031,opt,name=enumdecl_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoRegistration = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63032,
|
||||
Name: "gogoproto.goproto_registration",
|
||||
Tag: "varint,63032,opt,name=goproto_registration",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_MessagenameAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63033,
|
||||
Name: "gogoproto.messagename_all",
|
||||
Tag: "varint,63033,opt,name=messagename_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoSizecacheAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63034,
|
||||
Name: "gogoproto.goproto_sizecache_all",
|
||||
Tag: "varint,63034,opt,name=goproto_sizecache_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63035,
|
||||
Name: "gogoproto.goproto_unkeyed_all",
|
||||
Tag: "varint,63035,opt,name=goproto_unkeyed_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoGetters = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64001,
|
||||
Name: "gogoproto.goproto_getters",
|
||||
Tag: "varint,64001,opt,name=goproto_getters",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64003,
|
||||
Name: "gogoproto.goproto_stringer",
|
||||
Tag: "varint,64003,opt,name=goproto_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_VerboseEqual = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64004,
|
||||
Name: "gogoproto.verbose_equal",
|
||||
Tag: "varint,64004,opt,name=verbose_equal",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Face = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64005,
|
||||
Name: "gogoproto.face",
|
||||
Tag: "varint,64005,opt,name=face",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Gostring = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64006,
|
||||
Name: "gogoproto.gostring",
|
||||
Tag: "varint,64006,opt,name=gostring",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Populate = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64007,
|
||||
Name: "gogoproto.populate",
|
||||
Tag: "varint,64007,opt,name=populate",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 67008,
|
||||
Name: "gogoproto.stringer",
|
||||
Tag: "varint,67008,opt,name=stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Onlyone = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64009,
|
||||
Name: "gogoproto.onlyone",
|
||||
Tag: "varint,64009,opt,name=onlyone",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Equal = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64013,
|
||||
Name: "gogoproto.equal",
|
||||
Tag: "varint,64013,opt,name=equal",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Description = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64014,
|
||||
Name: "gogoproto.description",
|
||||
Tag: "varint,64014,opt,name=description",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Testgen = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64015,
|
||||
Name: "gogoproto.testgen",
|
||||
Tag: "varint,64015,opt,name=testgen",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Benchgen = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64016,
|
||||
Name: "gogoproto.benchgen",
|
||||
Tag: "varint,64016,opt,name=benchgen",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Marshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64017,
|
||||
Name: "gogoproto.marshaler",
|
||||
Tag: "varint,64017,opt,name=marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Unmarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64018,
|
||||
Name: "gogoproto.unmarshaler",
|
||||
Tag: "varint,64018,opt,name=unmarshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StableMarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64019,
|
||||
Name: "gogoproto.stable_marshaler",
|
||||
Tag: "varint,64019,opt,name=stable_marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Sizer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64020,
|
||||
Name: "gogoproto.sizer",
|
||||
Tag: "varint,64020,opt,name=sizer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeMarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64023,
|
||||
Name: "gogoproto.unsafe_marshaler",
|
||||
Tag: "varint,64023,opt,name=unsafe_marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64024,
|
||||
Name: "gogoproto.unsafe_unmarshaler",
|
||||
Tag: "varint,64024,opt,name=unsafe_unmarshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64025,
|
||||
Name: "gogoproto.goproto_extensions_map",
|
||||
Tag: "varint,64025,opt,name=goproto_extensions_map",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnrecognized = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64026,
|
||||
Name: "gogoproto.goproto_unrecognized",
|
||||
Tag: "varint,64026,opt,name=goproto_unrecognized",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Protosizer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64028,
|
||||
Name: "gogoproto.protosizer",
|
||||
Tag: "varint,64028,opt,name=protosizer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Compare = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64029,
|
||||
Name: "gogoproto.compare",
|
||||
Tag: "varint,64029,opt,name=compare",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Typedecl = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64030,
|
||||
Name: "gogoproto.typedecl",
|
||||
Tag: "varint,64030,opt,name=typedecl",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Messagename = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64033,
|
||||
Name: "gogoproto.messagename",
|
||||
Tag: "varint,64033,opt,name=messagename",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoSizecache = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64034,
|
||||
Name: "gogoproto.goproto_sizecache",
|
||||
Tag: "varint,64034,opt,name=goproto_sizecache",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnkeyed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64035,
|
||||
Name: "gogoproto.goproto_unkeyed",
|
||||
Tag: "varint,64035,opt,name=goproto_unkeyed",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Nullable = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65001,
|
||||
Name: "gogoproto.nullable",
|
||||
Tag: "varint,65001,opt,name=nullable",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Embed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65002,
|
||||
Name: "gogoproto.embed",
|
||||
Tag: "varint,65002,opt,name=embed",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Customtype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65003,
|
||||
Name: "gogoproto.customtype",
|
||||
Tag: "bytes,65003,opt,name=customtype",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Customname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65004,
|
||||
Name: "gogoproto.customname",
|
||||
Tag: "bytes,65004,opt,name=customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Jsontag = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65005,
|
||||
Name: "gogoproto.jsontag",
|
||||
Tag: "bytes,65005,opt,name=jsontag",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Moretags = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65006,
|
||||
Name: "gogoproto.moretags",
|
||||
Tag: "bytes,65006,opt,name=moretags",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Casttype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65007,
|
||||
Name: "gogoproto.casttype",
|
||||
Tag: "bytes,65007,opt,name=casttype",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Castkey = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65008,
|
||||
Name: "gogoproto.castkey",
|
||||
Tag: "bytes,65008,opt,name=castkey",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Castvalue = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65009,
|
||||
Name: "gogoproto.castvalue",
|
||||
Tag: "bytes,65009,opt,name=castvalue",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stdtime = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65010,
|
||||
Name: "gogoproto.stdtime",
|
||||
Tag: "varint,65010,opt,name=stdtime",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stdduration = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65011,
|
||||
Name: "gogoproto.stdduration",
|
||||
Tag: "varint,65011,opt,name=stdduration",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Wktpointer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65012,
|
||||
Name: "gogoproto.wktpointer",
|
||||
Tag: "varint,65012,opt,name=wktpointer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_GoprotoEnumPrefix)
|
||||
proto.RegisterExtension(E_GoprotoEnumStringer)
|
||||
proto.RegisterExtension(E_EnumStringer)
|
||||
proto.RegisterExtension(E_EnumCustomname)
|
||||
proto.RegisterExtension(E_Enumdecl)
|
||||
proto.RegisterExtension(E_EnumvalueCustomname)
|
||||
proto.RegisterExtension(E_GoprotoGettersAll)
|
||||
proto.RegisterExtension(E_GoprotoEnumPrefixAll)
|
||||
proto.RegisterExtension(E_GoprotoStringerAll)
|
||||
proto.RegisterExtension(E_VerboseEqualAll)
|
||||
proto.RegisterExtension(E_FaceAll)
|
||||
proto.RegisterExtension(E_GostringAll)
|
||||
proto.RegisterExtension(E_PopulateAll)
|
||||
proto.RegisterExtension(E_StringerAll)
|
||||
proto.RegisterExtension(E_OnlyoneAll)
|
||||
proto.RegisterExtension(E_EqualAll)
|
||||
proto.RegisterExtension(E_DescriptionAll)
|
||||
proto.RegisterExtension(E_TestgenAll)
|
||||
proto.RegisterExtension(E_BenchgenAll)
|
||||
proto.RegisterExtension(E_MarshalerAll)
|
||||
proto.RegisterExtension(E_UnmarshalerAll)
|
||||
proto.RegisterExtension(E_StableMarshalerAll)
|
||||
proto.RegisterExtension(E_SizerAll)
|
||||
proto.RegisterExtension(E_GoprotoEnumStringerAll)
|
||||
proto.RegisterExtension(E_EnumStringerAll)
|
||||
proto.RegisterExtension(E_UnsafeMarshalerAll)
|
||||
proto.RegisterExtension(E_UnsafeUnmarshalerAll)
|
||||
proto.RegisterExtension(E_GoprotoExtensionsMapAll)
|
||||
proto.RegisterExtension(E_GoprotoUnrecognizedAll)
|
||||
proto.RegisterExtension(E_GogoprotoImport)
|
||||
proto.RegisterExtension(E_ProtosizerAll)
|
||||
proto.RegisterExtension(E_CompareAll)
|
||||
proto.RegisterExtension(E_TypedeclAll)
|
||||
proto.RegisterExtension(E_EnumdeclAll)
|
||||
proto.RegisterExtension(E_GoprotoRegistration)
|
||||
proto.RegisterExtension(E_MessagenameAll)
|
||||
proto.RegisterExtension(E_GoprotoSizecacheAll)
|
||||
proto.RegisterExtension(E_GoprotoUnkeyedAll)
|
||||
proto.RegisterExtension(E_GoprotoGetters)
|
||||
proto.RegisterExtension(E_GoprotoStringer)
|
||||
proto.RegisterExtension(E_VerboseEqual)
|
||||
proto.RegisterExtension(E_Face)
|
||||
proto.RegisterExtension(E_Gostring)
|
||||
proto.RegisterExtension(E_Populate)
|
||||
proto.RegisterExtension(E_Stringer)
|
||||
proto.RegisterExtension(E_Onlyone)
|
||||
proto.RegisterExtension(E_Equal)
|
||||
proto.RegisterExtension(E_Description)
|
||||
proto.RegisterExtension(E_Testgen)
|
||||
proto.RegisterExtension(E_Benchgen)
|
||||
proto.RegisterExtension(E_Marshaler)
|
||||
proto.RegisterExtension(E_Unmarshaler)
|
||||
proto.RegisterExtension(E_StableMarshaler)
|
||||
proto.RegisterExtension(E_Sizer)
|
||||
proto.RegisterExtension(E_UnsafeMarshaler)
|
||||
proto.RegisterExtension(E_UnsafeUnmarshaler)
|
||||
proto.RegisterExtension(E_GoprotoExtensionsMap)
|
||||
proto.RegisterExtension(E_GoprotoUnrecognized)
|
||||
proto.RegisterExtension(E_Protosizer)
|
||||
proto.RegisterExtension(E_Compare)
|
||||
proto.RegisterExtension(E_Typedecl)
|
||||
proto.RegisterExtension(E_Messagename)
|
||||
proto.RegisterExtension(E_GoprotoSizecache)
|
||||
proto.RegisterExtension(E_GoprotoUnkeyed)
|
||||
proto.RegisterExtension(E_Nullable)
|
||||
proto.RegisterExtension(E_Embed)
|
||||
proto.RegisterExtension(E_Customtype)
|
||||
proto.RegisterExtension(E_Customname)
|
||||
proto.RegisterExtension(E_Jsontag)
|
||||
proto.RegisterExtension(E_Moretags)
|
||||
proto.RegisterExtension(E_Casttype)
|
||||
proto.RegisterExtension(E_Castkey)
|
||||
proto.RegisterExtension(E_Castvalue)
|
||||
proto.RegisterExtension(E_Stdtime)
|
||||
proto.RegisterExtension(E_Stdduration)
|
||||
proto.RegisterExtension(E_Wktpointer)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) }
|
||||
|
||||
var fileDescriptor_592445b5231bc2b9 = []byte{
|
||||
// 1328 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45,
|
||||
0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9,
|
||||
0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18,
|
||||
0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84,
|
||||
0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f,
|
||||
0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7,
|
||||
0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6,
|
||||
0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9,
|
||||
0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6,
|
||||
0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59,
|
||||
0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc,
|
||||
0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99,
|
||||
0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19,
|
||||
0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b,
|
||||
0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79,
|
||||
0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8,
|
||||
0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d,
|
||||
0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4,
|
||||
0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78,
|
||||
0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0,
|
||||
0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1,
|
||||
0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6,
|
||||
0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae,
|
||||
0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c,
|
||||
0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0,
|
||||
0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b,
|
||||
0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04,
|
||||
0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28,
|
||||
0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36,
|
||||
0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50,
|
||||
0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d,
|
||||
0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa,
|
||||
0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5,
|
||||
0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b,
|
||||
0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24,
|
||||
0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05,
|
||||
0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2,
|
||||
0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b,
|
||||
0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92,
|
||||
0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56,
|
||||
0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e,
|
||||
0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19,
|
||||
0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70,
|
||||
0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0,
|
||||
0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c,
|
||||
0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a,
|
||||
0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0,
|
||||
0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4,
|
||||
0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95,
|
||||
0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9,
|
||||
0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9,
|
||||
0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f,
|
||||
0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9,
|
||||
0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5,
|
||||
0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8,
|
||||
0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb,
|
||||
0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae,
|
||||
0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31,
|
||||
0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d,
|
||||
0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30,
|
||||
0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94,
|
||||
0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f,
|
||||
0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36,
|
||||
0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e,
|
||||
0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b,
|
||||
0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e,
|
||||
0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb,
|
||||
0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5,
|
||||
0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17,
|
||||
0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45,
|
||||
0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32,
|
||||
0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4,
|
||||
0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8,
|
||||
0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f,
|
||||
0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49,
|
||||
0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f,
|
||||
0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb,
|
||||
0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c,
|
||||
0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90,
|
||||
0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e,
|
||||
0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd,
|
||||
0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb,
|
||||
0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00,
|
||||
}
|
||||
415
vendor/github.com/gogo/protobuf/gogoproto/helper.go
generated
vendored
Normal file
415
vendor/github.com/gogo/protobuf/gogoproto/helper.go
generated
vendored
Normal file
@@ -0,0 +1,415 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package gogoproto
|
||||
|
||||
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Embed, false)
|
||||
}
|
||||
|
||||
func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Nullable, true)
|
||||
}
|
||||
|
||||
func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Stdtime, false)
|
||||
}
|
||||
|
||||
func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Stdduration, false)
|
||||
}
|
||||
|
||||
func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue"
|
||||
}
|
||||
|
||||
func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue"
|
||||
}
|
||||
|
||||
func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value"
|
||||
}
|
||||
|
||||
func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value"
|
||||
}
|
||||
|
||||
func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value"
|
||||
}
|
||||
|
||||
func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value"
|
||||
}
|
||||
|
||||
func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue"
|
||||
}
|
||||
|
||||
func IsStdString(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue"
|
||||
}
|
||||
|
||||
func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue"
|
||||
}
|
||||
|
||||
func IsStdType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return (IsStdTime(field) || IsStdDuration(field) ||
|
||||
IsStdDouble(field) || IsStdFloat(field) ||
|
||||
IsStdInt64(field) || IsStdUInt64(field) ||
|
||||
IsStdInt32(field) || IsStdUInt32(field) ||
|
||||
IsStdBool(field) ||
|
||||
IsStdString(field) || IsStdBytes(field))
|
||||
}
|
||||
|
||||
func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false)
|
||||
}
|
||||
|
||||
func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
|
||||
nullable := IsNullable(field)
|
||||
if field.IsMessage() || IsCustomType(field) {
|
||||
return nullable
|
||||
}
|
||||
if proto3 {
|
||||
return false
|
||||
}
|
||||
return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
|
||||
}
|
||||
|
||||
func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCustomType(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastType(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastKey(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastValue(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
|
||||
}
|
||||
|
||||
func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
|
||||
}
|
||||
|
||||
func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Customtype)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Casttype)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Castkey)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Castvalue)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
name := GetCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
|
||||
name := GetEnumCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
|
||||
name := GetEnumValueCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Customname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_EnumCustomname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
|
||||
if field == nil {
|
||||
return nil
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Jsontag)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return (v.(*string))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
|
||||
if field == nil {
|
||||
return nil
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Moretags)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return (v.(*string))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
|
||||
|
||||
func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
|
||||
}
|
||||
|
||||
func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
|
||||
}
|
||||
|
||||
func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
|
||||
}
|
||||
|
||||
func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
|
||||
}
|
||||
|
||||
func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
|
||||
}
|
||||
|
||||
func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
|
||||
}
|
||||
|
||||
func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
|
||||
}
|
||||
|
||||
func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
|
||||
}
|
||||
|
||||
func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
|
||||
}
|
||||
|
||||
func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
|
||||
}
|
||||
|
||||
func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
|
||||
}
|
||||
|
||||
func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
|
||||
}
|
||||
|
||||
func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
|
||||
}
|
||||
|
||||
func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
|
||||
}
|
||||
|
||||
func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
|
||||
}
|
||||
|
||||
func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
|
||||
}
|
||||
|
||||
func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
|
||||
}
|
||||
|
||||
func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
|
||||
}
|
||||
|
||||
func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
|
||||
}
|
||||
|
||||
func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
|
||||
}
|
||||
|
||||
func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
|
||||
func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
|
||||
}
|
||||
|
||||
func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
|
||||
}
|
||||
|
||||
func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
|
||||
}
|
||||
|
||||
func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false))
|
||||
}
|
||||
|
||||
func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true))
|
||||
}
|
||||
|
||||
func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true))
|
||||
}
|
||||
1416
vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
1416
vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
generated
vendored
Normal file
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package descriptor provides functions for obtaining protocol buffer
|
||||
// descriptors for generated Go types.
|
||||
//
|
||||
// These functions cannot go in package proto because they depend on the
|
||||
// generated protobuf descriptor messages, which themselves depend on proto.
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
||||
func extractFile(gz []byte) (*FileDescriptorProto, error) {
|
||||
r, err := gzip.NewReader(bytes.NewReader(gz))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
|
||||
}
|
||||
|
||||
fd := new(FileDescriptorProto)
|
||||
if err := proto.Unmarshal(b, fd); err != nil {
|
||||
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
|
||||
}
|
||||
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// Message is a proto.Message with a method to return its descriptor.
|
||||
//
|
||||
// Message types generated by the protocol compiler always satisfy
|
||||
// the Message interface.
|
||||
type Message interface {
|
||||
proto.Message
|
||||
Descriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
|
||||
// describing the given message.
|
||||
func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
|
||||
gz, path := msg.Descriptor()
|
||||
fd, err := extractFile(gz)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
|
||||
}
|
||||
|
||||
md = fd.MessageType[path[0]]
|
||||
for _, i := range path[1:] {
|
||||
md = md.NestedType[i]
|
||||
}
|
||||
return fd, md
|
||||
}
|
||||
|
||||
// Is this field a scalar numeric type?
|
||||
func (field *FieldDescriptorProto) IsScalar() bool {
|
||||
if field.Type == nil {
|
||||
return false
|
||||
}
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE,
|
||||
FieldDescriptorProto_TYPE_FLOAT,
|
||||
FieldDescriptorProto_TYPE_INT64,
|
||||
FieldDescriptorProto_TYPE_UINT64,
|
||||
FieldDescriptorProto_TYPE_INT32,
|
||||
FieldDescriptorProto_TYPE_FIXED64,
|
||||
FieldDescriptorProto_TYPE_FIXED32,
|
||||
FieldDescriptorProto_TYPE_BOOL,
|
||||
FieldDescriptorProto_TYPE_UINT32,
|
||||
FieldDescriptorProto_TYPE_ENUM,
|
||||
FieldDescriptorProto_TYPE_SFIXED32,
|
||||
FieldDescriptorProto_TYPE_SFIXED64,
|
||||
FieldDescriptorProto_TYPE_SINT32,
|
||||
FieldDescriptorProto_TYPE_SINT64:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
Normal file
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
generated
vendored
Normal file
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
generated
vendored
Normal file
@@ -0,0 +1,752 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: descriptor.proto
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
sort "sort"
|
||||
strconv "strconv"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
func (this *FileDescriptorSet) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.FileDescriptorSet{")
|
||||
if this.File != nil {
|
||||
s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 16)
|
||||
s = append(s, "&descriptor.FileDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Package != nil {
|
||||
s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
|
||||
}
|
||||
if this.Dependency != nil {
|
||||
s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
|
||||
}
|
||||
if this.PublicDependency != nil {
|
||||
s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
|
||||
}
|
||||
if this.WeakDependency != nil {
|
||||
s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
|
||||
}
|
||||
if this.MessageType != nil {
|
||||
s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.Service != nil {
|
||||
s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.SourceCodeInfo != nil {
|
||||
s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
|
||||
}
|
||||
if this.Syntax != nil {
|
||||
s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.DescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Field != nil {
|
||||
s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.NestedType != nil {
|
||||
s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.ExtensionRange != nil {
|
||||
s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
|
||||
}
|
||||
if this.OneofDecl != nil {
|
||||
s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ExtensionRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ExtensionRangeOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.ExtensionRangeOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.FieldDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Label != nil {
|
||||
s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
|
||||
}
|
||||
if this.Type != nil {
|
||||
s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
|
||||
}
|
||||
if this.TypeName != nil {
|
||||
s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
|
||||
}
|
||||
if this.Extendee != nil {
|
||||
s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
|
||||
}
|
||||
if this.DefaultValue != nil {
|
||||
s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
|
||||
}
|
||||
if this.OneofIndex != nil {
|
||||
s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
|
||||
}
|
||||
if this.JsonName != nil {
|
||||
s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.OneofDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Value != nil {
|
||||
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumValueDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.ServiceDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Method != nil {
|
||||
s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 10)
|
||||
s = append(s, "&descriptor.MethodDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.InputType != nil {
|
||||
s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
|
||||
}
|
||||
if this.OutputType != nil {
|
||||
s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ClientStreaming != nil {
|
||||
s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.ServerStreaming != nil {
|
||||
s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 25)
|
||||
s = append(s, "&descriptor.FileOptions{")
|
||||
if this.JavaPackage != nil {
|
||||
s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
|
||||
}
|
||||
if this.JavaOuterClassname != nil {
|
||||
s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
|
||||
}
|
||||
if this.JavaMultipleFiles != nil {
|
||||
s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenerateEqualsAndHash != nil {
|
||||
s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
|
||||
}
|
||||
if this.JavaStringCheckUtf8 != nil {
|
||||
s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
|
||||
}
|
||||
if this.OptimizeFor != nil {
|
||||
s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
|
||||
}
|
||||
if this.GoPackage != nil {
|
||||
s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
|
||||
}
|
||||
if this.CcGenericServices != nil {
|
||||
s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenericServices != nil {
|
||||
s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PyGenericServices != nil {
|
||||
s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PhpGenericServices != nil {
|
||||
s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.CcEnableArenas != nil {
|
||||
s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
|
||||
}
|
||||
if this.ObjcClassPrefix != nil {
|
||||
s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.CsharpNamespace != nil {
|
||||
s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.SwiftPrefix != nil {
|
||||
s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpClassPrefix != nil {
|
||||
s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpNamespace != nil {
|
||||
s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.PhpMetadataNamespace != nil {
|
||||
s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n")
|
||||
}
|
||||
if this.RubyPackage != nil {
|
||||
s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MessageOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.MessageOptions{")
|
||||
if this.MessageSetWireFormat != nil {
|
||||
s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
|
||||
}
|
||||
if this.NoStandardDescriptorAccessor != nil {
|
||||
s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.MapEntry != nil {
|
||||
s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.FieldOptions{")
|
||||
if this.Ctype != nil {
|
||||
s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
|
||||
}
|
||||
if this.Packed != nil {
|
||||
s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
|
||||
}
|
||||
if this.Jstype != nil {
|
||||
s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
|
||||
}
|
||||
if this.Lazy != nil {
|
||||
s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.Weak != nil {
|
||||
s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.OneofOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumOptions{")
|
||||
if this.AllowAlias != nil {
|
||||
s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumValueOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.ServiceOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.MethodOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.IdempotencyLevel != nil {
|
||||
s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.UninterpretedOption{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
|
||||
}
|
||||
if this.IdentifierValue != nil {
|
||||
s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
|
||||
}
|
||||
if this.PositiveIntValue != nil {
|
||||
s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
|
||||
}
|
||||
if this.NegativeIntValue != nil {
|
||||
s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
|
||||
}
|
||||
if this.DoubleValue != nil {
|
||||
s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
|
||||
}
|
||||
if this.StringValue != nil {
|
||||
s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
|
||||
}
|
||||
if this.AggregateValue != nil {
|
||||
s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption_NamePart) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.UninterpretedOption_NamePart{")
|
||||
if this.NamePart != nil {
|
||||
s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
|
||||
}
|
||||
if this.IsExtension != nil {
|
||||
s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.SourceCodeInfo{")
|
||||
if this.Location != nil {
|
||||
s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo_Location) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.SourceCodeInfo_Location{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.Span != nil {
|
||||
s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
|
||||
}
|
||||
if this.LeadingComments != nil {
|
||||
s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
|
||||
}
|
||||
if this.TrailingComments != nil {
|
||||
s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
|
||||
}
|
||||
if this.LeadingDetachedComments != nil {
|
||||
s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo{")
|
||||
if this.Annotation != nil {
|
||||
s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo_Annotation) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 8)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.SourceFile != nil {
|
||||
s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
|
||||
}
|
||||
if this.Begin != nil {
|
||||
s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringDescriptor(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
||||
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
generated
vendored
Normal file
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
|
||||
if !msg.GetOptions().GetMapEntry() {
|
||||
return nil, nil
|
||||
}
|
||||
return msg.GetField()[0], msg.GetField()[1]
|
||||
}
|
||||
|
||||
func dotToUnderscore(r rune) rune {
|
||||
if r == '.' {
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) WireType() (wire int) {
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FLOAT:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_INT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_INT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_FIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_BOOL:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_STRING:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_GROUP:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_MESSAGE:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_BYTES:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_ENUM:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SFIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_SFIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_SINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SINT64:
|
||||
return 0
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
|
||||
packed := field.IsPacked()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
|
||||
packed := field.IsPacked3()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey() []byte {
|
||||
x := field.GetKeyUint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3() []byte {
|
||||
x := field.GetKey3Uint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
|
||||
msg := desc.GetMessage(packageName, messageName)
|
||||
if msg == nil {
|
||||
return nil
|
||||
}
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
|
||||
if nes != nil {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
|
||||
if res != nil {
|
||||
return res
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) IsExtendable() bool {
|
||||
return len(msg.GetExtensionRange()) > 0
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetName() == fieldName {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetNumber() == fieldNum {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", ""
|
||||
}
|
||||
field := parent.GetFieldDescriptor(fieldName)
|
||||
if field == nil {
|
||||
var extPackageName string
|
||||
extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
|
||||
if field == nil {
|
||||
return "", ""
|
||||
}
|
||||
packageName = extPackageName
|
||||
}
|
||||
typeNames := strings.Split(field.GetTypeName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg == nil {
|
||||
return "", ""
|
||||
}
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
if len(typeNames) > 2 {
|
||||
for i := 1; i < len(typeNames)-1; i++ {
|
||||
packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
|
||||
typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg != nil {
|
||||
typeNames := strings.Split(msg.GetName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, enum := range file.GetEnumType() {
|
||||
if enum.GetName() == typeName {
|
||||
return enum
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsEnum() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_ENUM
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsMessage() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBytes() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BYTES
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRepeated() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsString() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_STRING
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBool() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BOOL
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRequired() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked() bool {
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked3() bool {
|
||||
if f.IsRepeated() && f.IsScalar() {
|
||||
if f.Options == nil || f.GetOptions().Packed == nil {
|
||||
return true
|
||||
}
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *DescriptorProto) HasExtension() bool {
|
||||
return len(m.ExtensionRange) > 0
|
||||
}
|
||||
140
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
140
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *Any) (string, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func EmptyAny(any *Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = EmptyAny(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *Any, pb proto.Message) bool {
|
||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
||||
// but it avoids scanning TypeUrl for the slash.
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(pb)
|
||||
prefix := len(any.TypeUrl) - len(name)
|
||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user