Revert "upgrade sdk to support Kafka (#436)" (#446)

Signed-off-by: Wei Liu <liuweixa@redhat.com>
This commit is contained in:
Wei Liu
2024-04-26 15:16:17 +08:00
committed by GitHub
parent 799ee4f078
commit 147f40c363
83 changed files with 1487 additions and 31830 deletions

8
go.mod
View File

@@ -3,8 +3,7 @@ module open-cluster-management.io/ocm
go 1.21
require (
github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf
github.com/confluentinc/confluent-kafka-go/v2 v2.3.0
github.com/cloudevents/sdk-go/v2 v2.15.2
github.com/davecgh/go-spew v1.1.1
github.com/evanphx/json-patch v5.9.0+incompatible
github.com/ghodss/yaml v1.0.0
@@ -37,7 +36,7 @@ require (
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556
open-cluster-management.io/api v0.13.1-0.20240419062633-aacb530ea4ad
open-cluster-management.io/sdk-go v0.13.1-0.20240422015316-b61db1fa9e71
open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090
sigs.k8s.io/controller-runtime v0.17.3
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96
)
@@ -55,7 +54,6 @@ require (
github.com/bwmarrin/snowflake v0.3.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 // indirect
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@@ -93,8 +91,6 @@ require (
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect

50
go.sum
View File

@@ -3,8 +3,6 @@ cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiV
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
@@ -14,10 +12,6 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
@@ -37,20 +31,12 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 h1:3/pjormyqkSjF2GHQehTELZ9oqlER4GrJZiVUIk8Fy8=
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991/go.mod h1:xiar5+gk13WqyAUQ/cpcxcjD1IhLe/PeilSfCdPcfMU=
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 h1:pXyRKZ0T5WoB6X9QnHS5cEyW0Got39bNQIECxGUKVO4=
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995/go.mod h1:mz9oS2Yhh/S7cvrrsgGMMR+6Shy0ZyL2lDN1sHQO1wE=
github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf h1:91HOb+vxZZQ1rJTJtvhJPRl2qyQa5bqh7lrIYhQSDnQ=
github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE=
github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc=
github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 h1:icCHutJouWlQREayFwCc7lxDAhws08td+W3/gdqgZts=
github.com/confluentinc/confluent-kafka-go/v2 v2.3.0/go.mod h1:/VTy8iEpe6mD9pkCH5BhijlUl8ulUXymKv1Qig5Rgb8=
github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw=
github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@@ -62,14 +48,6 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eclipse/paho.golang v0.11.0 h1:6Avu5dkkCfcB61/y1vx+XrPQ0oAl4TPYtY0uw3HbQdM=
@@ -170,8 +148,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -179,8 +155,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
@@ -191,12 +165,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/mochi-mqtt/server/v2 v2.4.6 h1:3iaQLG4hD/2vSh0Rwu4+h//KUcWR2zAKQIxhJuoJmCg=
github.com/mochi-mqtt/server/v2 v2.4.6/go.mod h1:M1lZnLbyowXUyQBIlHYlX1wasxXqv/qFWwQxAzfphwA=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -204,8 +172,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
@@ -216,12 +182,6 @@ github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/openshift/api v0.0.0-20231218131639-7a5aa77cc72d h1:aVjDasSo08KUIltX++Mcl6ptN0ooxh3dRttHBFGVVI0=
github.com/openshift/api v0.0.0-20231218131639-7a5aa77cc72d/go.mod h1:RLaNkRn87bQeH3MpTWXCxlSb62qVGBxfQY344jBfVsg=
github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8 h1:cu3YUMVGsKIyFyJGO3F6BZKGYQZpCKxAv9cBPgQAca8=
@@ -280,8 +240,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/testcontainers/testcontainers-go v0.14.0 h1:h0D5GaYG9mhOWr2qHdEKDXpkce/VlvaYOCzTRi6UBi8=
github.com/testcontainers/testcontainers-go v0.14.0/go.mod h1:hSRGJ1G8Q5Bw2gXgPulJOLlEBaYJHeBSOkQM5JLG+JQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
@@ -469,8 +427,8 @@ open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556
open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556/go.mod h1:HayKCznnlyW+0dUJQGj5sNR6i3tvylSySD3YnvZkBtY=
open-cluster-management.io/api v0.13.1-0.20240419062633-aacb530ea4ad h1:DB3GpK5vzbGu9ss13bfodi8pGTkPcpdcLvOPEPMptTk=
open-cluster-management.io/api v0.13.1-0.20240419062633-aacb530ea4ad/go.mod h1:yrNuMMpciXjXPnj2yznb6LTyrGliiTrFZAJDp/Ck3c4=
open-cluster-management.io/sdk-go v0.13.1-0.20240422015316-b61db1fa9e71 h1:JAD43qabo6XKEWO4zbC4rlnbzgncMkF4olAz03EdaNc=
open-cluster-management.io/sdk-go v0.13.1-0.20240422015316-b61db1fa9e71/go.mod h1:XBrldz+AqVBy9miOVNIu+6l8JXS18i795XbTqIqURJU=
open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 h1:zFmHuW+ztdfUUNslqNW+H1WEcfdEUQHoRDbmdajX340=
open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090/go.mod h1:w2OaxtCyegxeyFLU42UQ3oxUz01QdsBQkcHI17T/l48=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk=

View File

@@ -44,7 +44,7 @@ func (o *WorkloadAgentOptions) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period",
o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction")
fs.StringVar(&o.WorkloadSourceDriver, "workload-source-driver",
o.WorkloadSourceDriver, "The type of workload source driver, currently it can be kube, mqtt, grpc or kafka")
o.WorkloadSourceDriver, "The type of workload source driver, currently it can be kube, mqtt or grpc")
fs.StringVar(&o.WorkloadSourceConfig, "workload-source-config",
o.WorkloadSourceConfig, "The config file path of current workload source")
fs.StringVar(&o.CloudEventsClientID, "cloudevents-client-id",

View File

@@ -51,11 +51,9 @@ test-addon-integration: ensure-kubebuilder-tools
./addon-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast
.PHONY: test-addon-integration
# TODO we focus on the MQTT test for now, becasue the confluent Kafka mock API is experimental, it makes tests flaky.
# We can enable the Kafka tests after we find a stable way to mock Kafka.
test-cloudevents-integration: ensure-kubebuilder-tools
go test -c ./test/integration/cloudevents -o ./cloudevents-integration.test
./cloudevents-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast --ginkgo.skip-file=^*_kafka_test.go
./cloudevents-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast
.PHONY: test-cloudevents-integration
test-integration: test-registration-operator-integration test-registration-integration test-placement-integration test-work-integration test-addon-integration

View File

@@ -1,7 +0,0 @@
package cloudevents
import (
"github.com/onsi/ginkgo/v2"
)
var _ = ginkgo.Describe("ManifestWork Delete Option (Kafka)", runDeleteOptionTest(kafkaSourceInfo, kClusterNameGenerator.generate))

View File

@@ -1,5 +0,0 @@
package cloudevents
import "github.com/onsi/ginkgo/v2"
var _ = ginkgo.Describe("ManifestWork Delete Option (MQTT)", runDeleteOptionTest(mqttSourceInfo, clusterName))

View File

@@ -10,8 +10,8 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilrand "k8s.io/apimachinery/pkg/util/rand"
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
@@ -19,437 +19,425 @@ import (
"open-cluster-management.io/ocm/test/integration/util"
)
func runDeleteOptionTest(sourceInfoGetter sourceInfoGetter, clusterNameGetter clusterNameGetter) func() {
return func() {
var err error
var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
var err error
var cancel context.CancelFunc
var cancel context.CancelFunc
var clusterName string
var sourceDriver string
var sourceConfigPath string
var sourceHash string
var sourceClient workclientset.Interface
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
var anotherWork *workapiv1.ManifestWork
var appliedManifestWorkName string
var anotherAppliedManifestWorkName string
var clusterName string
ginkgo.BeforeEach(func() {
clusterName = utilrand.String(5)
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
var anotherWork *workapiv1.ManifestWork
var appliedManifestWorkName string
var anotherAppliedManifestWorkName string
ns := &corev1.Namespace{}
ns.Name = clusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
o.WorkloadSourceDriver = workSourceDriver
o.WorkloadSourceConfig = workSourceConfigFileName
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifest", "manifestbundle"}
commOptions := commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
go runWorkAgent(ctx, o, commOptions)
// reset manifests
manifests = nil
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("Delete options", func() {
ginkgo.BeforeEach(func() {
sourceClient, sourceDriver, sourceConfigPath, sourceHash = sourceInfoGetter()
gomega.Expect(sourceClient).ToNot(gomega.BeNil())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
clusterName = clusterNameGetter()
ns := &corev1.Namespace{}
ns.Name = clusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigPath
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifest", "manifestbundle"}
commOptions := commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
go runWorkAgent(ctx, o, commOptions)
// reset manifests
manifests = nil
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}
work = util.NewManifestWork(clusterName, "", manifests)
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
ginkgo.It("Orphan deletion of the whole manifestwork", func() {
work.Spec.DeleteOption = &workapiv1.DeleteOption{
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Ensure configmap exists
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
if len(cm.OwnerReferences) != 0 {
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete the work
err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Ensure configmap exists
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
ginkgo.Context("Delete options", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}
work = util.NewManifestWork(clusterName, "", manifests)
})
ginkgo.It("Orphan deletion of the whole manifestwork", func() {
work.Spec.DeleteOption = &workapiv1.DeleteOption{
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
}
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Ensure configmap exists
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
if len(cm.OwnerReferences) != 0 {
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete the work
err = sourceClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Ensure configmap exists
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("Clean the resource when orphan deletion option is removed", func() {
work.Spec.DeleteOption = &workapiv1.DeleteOption{
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan,
SelectivelyOrphan: &workapiv1.SelectivelyOrphan{
OrphaningRules: []workapiv1.OrphaningRule{
{
Group: "",
Resource: "configmaps",
Namespace: clusterName,
Name: cm1,
},
ginkgo.It("Clean the resource when orphan deletion option is removed", func() {
work.Spec.DeleteOption = &workapiv1.DeleteOption{
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan,
SelectivelyOrphan: &workapiv1.SelectivelyOrphan{
OrphaningRules: []workapiv1.OrphaningRule{
{
Group: "",
Resource: "configmaps",
Namespace: clusterName,
Name: cm1,
},
},
},
}
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Ensure configmap exists
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
if len(cm.OwnerReferences) != 0 {
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
}
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Ensure configmap exists
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
if len(cm.OwnerReferences) != 0 {
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Remove the delete option
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.DeleteOption = nil
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
// Remove the delete option
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
}
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.DeleteOption = nil
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
if len(cm.OwnerReferences) != 1 {
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
}
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
if len(cm.OwnerReferences) != 1 {
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
}
// Delete the work
err = sourceClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Delete the work
err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// All of the resource should be deleted.
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
// Wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// All of the resource should be deleted.
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
})
})
ginkgo.Context("Resource sharing and adoption between manifestworks", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})),
}
work = util.NewManifestWork(clusterName, "", manifests)
// Create another manifestworks with one shared resource.
anotherWork = util.NewManifestWork(clusterName, "sharing-resource-work", []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, []string{})),
})
})
ginkgo.Context("Resource sharing and adoption between manifestworks", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})),
ginkgo.JustBeforeEach(func() {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
appliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, work.UID)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
anotherWork, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, anotherWork.UID)
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() {
// ensure configmap exists and get its uid
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
currentUID := curentConfigMap.UID
// Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct.
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
work = util.NewManifestWork(clusterName, "", manifests)
// Create another manifestworks with one shared resource.
anotherWork = util.NewManifestWork(clusterName, "sharing-resource-work", []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, []string{})),
})
})
ginkgo.JustBeforeEach(func() {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
appliedManifestWorkName = toAppliedManifestWorkName(sourceHash, work)
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
anotherWork, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
anotherAppliedManifestWorkName = toAppliedManifestWorkName(sourceHash, anotherWork)
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, sourceClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() {
// ensure configmap exists and get its uid
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
currentUID := curentConfigMap.UID
// Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct.
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
}
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
}
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete one manifestwork
err = sourceClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource
gomega.Eventually(func() error {
appliedWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if errors.IsNotFound(err) {
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("appliedmanifestwork should not exist: %v", appliedWork.DeletionTimestamp)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork.
gomega.Eventually(func() error {
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
if currentUID != configMap.UID {
return fmt.Errorf("UID should be equal")
}
anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
hasAppliedResourceName := false
hasAppliedResourceUID := false
for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources {
if appliedResource.Name == cm1 {
hasAppliedResourceName = true
}
if appliedResource.UID != string(currentUID) {
hasAppliedResourceUID = true
}
}
if !hasAppliedResourceName {
return fmt.Errorf("resource Name should be cm1")
}
if !hasAppliedResourceUID {
return fmt.Errorf("UID should be equal")
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() {
// ensure configmap exists and get its uid
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
currentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
currentUID := currentConfigMap.UID
// Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct.
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
}
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
}
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Update one manifestwork to remove the shared resource
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests = []workapiv1.Manifest{
manifests[1],
util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"g": "h"}, []string{})),
}
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Ensure the resource is not tracked by the appliedmanifestwork.
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
}
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 {
return fmt.Errorf("found applied resource name cm1")
}
}
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete one manifestwork
err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource
gomega.Eventually(func() error {
appliedWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
}
if err != nil {
return err
}
return fmt.Errorf("appliedmanifestwork should not exist: %v", appliedWork.DeletionTimestamp)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork
gomega.Eventually(func() error {
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(
context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork.
gomega.Eventually(func() error {
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
if currentUID != configMap.UID {
return fmt.Errorf("UID should be equal")
}
anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
hasAppliedResourceName := false
hasAppliedResourceUID := false
for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources {
if appliedResource.Name == cm1 {
hasAppliedResourceName = true
}
if currentUID != configMap.UID {
return fmt.Errorf("UID should be equal")
if appliedResource.UID != string(currentUID) {
hasAppliedResourceUID = true
}
}
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
if !hasAppliedResourceName {
return fmt.Errorf("resource Name should be cm1")
}
hasAppliedResourceName := false
hasAppliedResourceUID := false
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 {
hasAppliedResourceName = true
}
if !hasAppliedResourceUID {
return fmt.Errorf("UID should be equal")
}
if appliedResource.UID != string(currentUID) {
hasAppliedResourceUID = true
}
}
if !hasAppliedResourceName {
return fmt.Errorf("resource Name should be cm1")
}
if !hasAppliedResourceUID {
return fmt.Errorf("UID should be equal")
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
}
}
ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() {
// ensure configmap exists and get its uid
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
currentUID := curentConfigMap.UID
// Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct.
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
}
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) {
return nil
}
}
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Update one manifestwork to remove the shared resource
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests = []workapiv1.Manifest{
manifests[1],
util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"g": "h"}, []string{})),
}
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Ensure the resource is not tracked by the appliedmanifestwork.
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 {
return fmt.Errorf("found applied resource name cm1")
}
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork
gomega.Eventually(func() error {
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(
context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
if currentUID != configMap.UID {
return fmt.Errorf("UID should be equal")
}
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
hasAppliedResourceName := false
hasAppliedResourceUID := false
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 {
hasAppliedResourceName = true
}
if appliedResource.UID != string(currentUID) {
hasAppliedResourceUID = true
}
}
if !hasAppliedResourceName {
return fmt.Errorf("resource Name should be cm1")
}
if !hasAppliedResourceUID {
return fmt.Errorf("UID should be equal")
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
})
})

View File

@@ -166,7 +166,7 @@ func startAgent(ctx context.Context, clusterName string) {
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
o.WorkloadSourceDriver = mqttDriver
o.WorkloadSourceDriver = workSourceDriver
o.WorkloadSourceConfig = mwrsConfigFileName
o.CloudEventsClientID = fmt.Sprintf("%s-work-client", clusterName)
o.CloudEventsClientCodecs = []string{"manifestbundle"}
@@ -179,7 +179,7 @@ func startAgent(ctx context.Context, clusterName string) {
func startCtrl(ctx context.Context) {
opts := hub.NewWorkHubManagerOptions()
opts.WorkDriver = mqttDriver
opts.WorkDriver = workSourceDriver
opts.WorkDriverConfig = mwrsConfigFileName
opts.CloudEventsClientID = "mwrsctrl-client"
hubConfig := hub.NewWorkHubManagerConfig(opts)

View File

@@ -7,7 +7,6 @@ import (
"os"
"time"
confluentkafka "github.com/confluentinc/confluent-kafka-go/v2/kafka"
"github.com/ghodss/yaml"
mochimqtt "github.com/mochi-mqtt/server/v2"
"github.com/mochi-mqtt/server/v2/hooks/auth"
@@ -17,75 +16,66 @@ import (
workinformers "open-cluster-management.io/api/client/work/informers/externalversions"
workv1 "open-cluster-management.io/api/work/v1"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
sdkoptions "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
"open-cluster-management.io/sdk-go/pkg/cloudevents/work"
"open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher"
"open-cluster-management.io/ocm/pkg/work/helper"
)
const (
sourceID = "cloudevents-mqtt-integration-test"
mqttBrokerHost = "127.0.0.1:1883"
)
var mqttBroker *mochimqtt.Server
type Source interface {
Host() string
Hash() string
Start(ctx context.Context) error
Stop() error
Workclientset() workclientset.Interface
}
type MQTTSource struct {
mqttBroker *mochimqtt.Server
workClientSet workclientset.Interface
sourceID string
mqttBrokerHost string
brokerHash string
configFile string
configFile string
workClientSet workclientset.Interface
}
func NewMQTTSource(configFile string) *MQTTSource {
return &MQTTSource{
sourceID: "cloudevents-mqtt-integration-test",
mqttBrokerHost: "127.0.0.1:1883",
brokerHash: helper.HubHash("127.0.0.1:1883"),
configFile: configFile,
configFile: configFile,
}
}
func (m *MQTTSource) Host() string {
return m.mqttBrokerHost
}
func (m *MQTTSource) Hash() string {
return m.brokerHash
return mqttBrokerHost
}
func (m *MQTTSource) Start(ctx context.Context) error {
// start a MQTT broker
m.mqttBroker = mochimqtt.New(nil)
mqttBroker = mochimqtt.New(nil)
// allow all connections
if err := m.mqttBroker.AddHook(new(auth.AllowHook), nil); err != nil {
if err := mqttBroker.AddHook(new(auth.AllowHook), nil); err != nil {
return err
}
if err := m.mqttBroker.AddListener(listeners.NewTCP("mqtt-test-broker", m.mqttBrokerHost, nil)); err != nil {
if err := mqttBroker.AddListener(listeners.NewTCP("mqtt-test-broker", mqttBrokerHost, nil)); err != nil {
return err
}
go func() {
if err := m.mqttBroker.Serve(); err != nil {
if err := mqttBroker.Serve(); err != nil {
log.Fatal(err)
}
}()
// write the mqtt broker config to a file
config := mqtt.MQTTConfig{
BrokerHost: m.mqttBrokerHost,
BrokerHost: mqttBrokerHost,
Topics: &types.Topics{
SourceEvents: fmt.Sprintf("sources/%s/clusters/+/sourceevents", m.sourceID),
AgentEvents: fmt.Sprintf("sources/%s/clusters/+/agentevents", m.sourceID),
SourceEvents: fmt.Sprintf("sources/%s/clusters/+/sourceevents", sourceID),
AgentEvents: fmt.Sprintf("sources/%s/clusters/+/agentevents", sourceID),
},
}
@@ -97,128 +87,23 @@ func (m *MQTTSource) Start(ctx context.Context) error {
return err
}
// build a source client
workLister := &manifestWorkLister{}
watcher := watcher.NewManifestWorkWatcher()
mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(m.configFile)
if err != nil {
return err
}
sourceOptions := mqtt.NewSourceOptions(mqttOptions, fmt.Sprintf("%s-client", m.sourceID), m.sourceID)
workClientSet, err := starSourceClient(ctx, sourceOptions)
if err != nil {
return err
}
m.workClientSet = workClientSet
return nil
}
func (m *MQTTSource) Stop() error {
return m.mqttBroker.Close()
}
func (m *MQTTSource) Workclientset() workclientset.Interface {
return m.workClientSet
}
type KafkaSource struct {
kafkaCluster *confluentkafka.MockCluster
workClientSet workclientset.Interface
sourceID string
bootstrapServer string
serverHash string
configFile string
}
func NewKafkaSource(configFile string) *KafkaSource {
return &KafkaSource{
sourceID: "cloudevents-kafka-integration-test",
configFile: configFile,
}
}
func (k *KafkaSource) Host() string {
return k.bootstrapServer
}
func (k *KafkaSource) Hash() string {
return k.serverHash
}
func (k *KafkaSource) Start(ctx context.Context) error {
kafkaCluster, err := confluentkafka.NewMockCluster(1)
if err != nil {
return err
}
k.kafkaCluster = kafkaCluster
k.bootstrapServer = kafkaCluster.BootstrapServers()
k.serverHash = helper.HubHash(k.bootstrapServer)
// Note: to use mock kafka cluster, the topics must be created firstly
// If new test cases is added, need to increase topics accordingly
if err := k.kafkaCluster.CreateTopic(fmt.Sprintf("sourcebroadcast.%s", k.sourceID), 1, 1); err != nil {
return err
}
if err := k.kafkaCluster.CreateTopic("agentbroadcast.cluster", 1, 1); err != nil {
return err
}
for i := 1; i < 20; i++ {
if err := k.kafkaCluster.CreateTopic(fmt.Sprintf("sourceevents.%s.kafka%d", k.sourceID, i), 1, 1); err != nil {
return err
}
if err := k.kafkaCluster.CreateTopic(fmt.Sprintf("agentevents.%s.kafka%d", k.sourceID, i), 1, 1); err != nil {
return err
}
}
kafkaOptions := kafka.KafkaOptions{
BootstrapServer: k.bootstrapServer,
}
optionsData, err := yaml.Marshal(kafkaOptions)
if err != nil {
return err
}
if err := os.WriteFile(k.configFile, optionsData, 0600); err != nil {
return err
}
kafkaConfigmap, err := kafka.BuildKafkaOptionsFromFlags(k.configFile)
if err != nil {
return err
}
sourceOptions := kafka.NewSourceOptions(kafkaConfigmap, k.sourceID)
workClientSet, err := starSourceClient(ctx, sourceOptions)
if err != nil {
return err
}
k.workClientSet = workClientSet
return nil
}
func (k *KafkaSource) Stop() error {
k.kafkaCluster.Close()
return nil
}
func (k *KafkaSource) Workclientset() workclientset.Interface {
return k.workClientSet
}
func starSourceClient(ctx context.Context, sourceOptions *sdkoptions.CloudEventsSourceOptions) (workclientset.Interface, error) {
// build a source client
workLister := &manifestWorkLister{}
watcher := watcher.NewManifestWorkWatcher()
cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork](
ctx,
sourceOptions,
mqtt.NewSourceOptions(mqttOptions, fmt.Sprintf("%s-client", sourceID), sourceID),
workLister,
work.ManifestWorkStatusHash,
&ManifestCodec{},
&ManifestBundleCodec{},
)
if err != nil {
return nil, err
return err
}
manifestWorkClient := newManifestWorkSourceClient(cloudEventsClient, watcher)
@@ -232,8 +117,17 @@ func starSourceClient(ctx context.Context, sourceOptions *sdkoptions.CloudEvents
// start the source client
cloudEventsClient.Subscribe(ctx, newManifestWorkStatusHandler(manifestWorkLister, watcher))
m.workClientSet = workClientSet
go informers.Informer().Run(ctx.Done())
return workClientSet, nil
return nil
}
func (m *MQTTSource) Stop() error {
return mqttBroker.Close()
}
func (m *MQTTSource) Workclientset() workclientset.Interface {
return m.workClientSet
}

View File

@@ -1,5 +0,0 @@
package cloudevents
import "github.com/onsi/ginkgo/v2"
var _ = ginkgo.Describe("ManifestWork Status Feedback (Kafka)", runStatusFeedbackTest(kafkaSourceInfo, kClusterNameGenerator.generate))

View File

@@ -1,5 +0,0 @@
package cloudevents
import "github.com/onsi/ginkgo/v2"
var _ = ginkgo.Describe("ManifestWork Status Feedback (MQTT)", runStatusFeedbackTest(mqttSourceInfo, clusterName))

File diff suppressed because it is too large Load Diff

View File

@@ -9,10 +9,8 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"go.uber.org/zap/zapcore"
"gopkg.in/yaml.v2"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
@@ -27,34 +25,20 @@ import (
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
"open-cluster-management.io/ocm/pkg/common/options"
"open-cluster-management.io/ocm/pkg/features"
"open-cluster-management.io/ocm/pkg/work/spoke"
"open-cluster-management.io/ocm/pkg/work/helper"
"open-cluster-management.io/ocm/test/integration/cloudevents/source"
"open-cluster-management.io/ocm/test/integration/util"
)
type sourceInfoGetter func() (workclientset.Interface, string, string, string)
type clusterNameGetter func() string
type kafkaClusterNameGenerator struct{ count int }
func (g *kafkaClusterNameGenerator) generate() string {
g.count = g.count + 1
return fmt.Sprintf("kafka%d", g.count)
}
const (
eventuallyTimeout = 60 // seconds
eventuallyInterval = 1 // seconds
cm1, cm2, cm3, cm4 = "cm1", "cm2", "cm3", "cm4"
cm1, cm2 = "cm1", "cm2"
)
// TODO consider to use one integration with work integration
const (
mqttDriver = "mqtt"
kafkaDriver = "kafka"
)
// focus on source is a MQTT broker
const workSourceDriver = "mqtt"
var tempDir string
@@ -62,13 +46,10 @@ var testEnv *envtest.Environment
var envCtx context.Context
var envCancel context.CancelFunc
var mqttSource source.Source
var mqttSourceConfigPath string
var mqttSourceWorkClient workclientset.Interface
var kafkaSource source.Source
var kafkaSourceConfigPath string
var kafkaSourceWorkClient workclientset.Interface
var workSource source.Source
var workSourceConfigFileName string
var workSourceWorkClient workclientset.Interface
var workSourceHash string
var mwrsConfigFileName string
@@ -80,8 +61,6 @@ var spokeRestConfig *rest.Config
var spokeKubeClient kubernetes.Interface
var spokeWorkClient workclientset.Interface
var kClusterNameGenerator *kafkaClusterNameGenerator = &kafkaClusterNameGenerator{}
var CRDPaths = []string{
// hub
"./vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml",
@@ -134,37 +113,38 @@ var _ = ginkgo.BeforeSuite(func() {
hubWorkClient, err = workclientset.NewForConfig(cfg)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// create source client with mqtt
mqttSourceConfigPath = path.Join(tempDir, "mqttconfig")
mqttSource = source.NewMQTTSource(mqttSourceConfigPath)
err = mqttSource.Start(envCtx)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
mqttSourceWorkClient = mqttSource.Workclientset()
gomega.Expect(mqttSourceWorkClient).ToNot(gomega.BeNil())
switch workSourceDriver {
case "mqtt":
// create mqttconfig file for source in a tmp dir
workSourceConfigFileName = path.Join(tempDir, "mqttconfig")
// create source client with kafka
kafkaSourceConfigPath = path.Join(tempDir, "kafkaconfig")
kafkaSource = source.NewKafkaSource(kafkaSourceConfigPath)
err = kafkaSource.Start(envCtx)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
kafkaSourceWorkClient = kafkaSource.Workclientset()
gomega.Expect(kafkaSourceWorkClient).ToNot(gomega.BeNil())
workSource = source.NewMQTTSource(workSourceConfigFileName)
err := workSource.Start(envCtx)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// create mqttconfig file for mwrsctrl in a tmp dir
mwrsConfigFileName = path.Join(tempDir, "mwrsctrl-mqttconfig")
config := mqtt.MQTTConfig{
BrokerHost: mqttSource.Host(),
Topics: &types.Topics{
SourceEvents: "sources/mwrsctrl/clusters/+/sourceevents",
AgentEvents: "sources/mwrsctrl/clusters/+/agentevents",
SourceBroadcast: "sources/mwrsctrl/sourcebroadcast",
},
workSourceHash = helper.HubHash(workSource.Host())
workSourceWorkClient = workSource.Workclientset()
gomega.Expect(workSourceWorkClient).ToNot(gomega.BeNil())
// create mqttconfig file for mwrsctrl in a tmp dir
mwrsConfigFileName = path.Join(tempDir, "mwrsctrl-mqttconfig")
config := mqtt.MQTTConfig{
BrokerHost: workSource.Host(),
Topics: &types.Topics{
SourceEvents: "sources/mwrsctrl/clusters/+/sourceevents",
AgentEvents: "sources/mwrsctrl/clusters/+/agentevents",
SourceBroadcast: "sources/mwrsctrl/sourcebroadcast",
},
}
configData, err := yaml.Marshal(config)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = os.WriteFile(mwrsConfigFileName, configData, 0600)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
default:
ginkgo.AbortSuite(fmt.Sprintf("unsupported source driver: %s", workSourceDriver))
}
configData, err := yaml.Marshal(config)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = os.WriteFile(mwrsConfigFileName, configData, 0600)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
var _ = ginkgo.AfterSuite(func() {
@@ -172,10 +152,7 @@ var _ = ginkgo.AfterSuite(func() {
envCancel()
err := mqttSource.Stop()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err = kafkaSource.Stop()
err := workSource.Stop()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err = testEnv.Stop()
@@ -185,29 +162,3 @@ var _ = ginkgo.AfterSuite(func() {
os.RemoveAll(tempDir)
}
})
func runWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *options.AgentOptions) {
agentConfig := spoke.NewWorkAgentConfig(commOption, o)
err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{
KubeConfig: spokeRestConfig,
EventRecorder: util.NewIntegrationTestEventRecorder("integration"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func mqttSourceInfo() (workclientset.Interface, string, string, string) {
return mqttSourceWorkClient, mqttDriver, mqttSourceConfigPath, mqttSource.Hash()
}
func kafkaSourceInfo() (workclientset.Interface, string, string, string) {
return kafkaSourceWorkClient, kafkaDriver, kafkaSourceConfigPath, kafkaSource.Hash()
}
func clusterName() string {
return utilrand.String(5)
}
func toAppliedManifestWorkName(hash string, work *workapiv1.ManifestWork) string {
// if the source is not kube, the uid will be used as the manifestwork name
return fmt.Sprintf("%s-%s", hash, work.UID)
}

View File

@@ -1,7 +0,0 @@
package cloudevents
import (
"github.com/onsi/ginkgo/v2"
)
var _ = ginkgo.Describe("ManifestWork (Kafka)", runUpdateStrategyTest(kafkaSourceInfo, kClusterNameGenerator.generate))

View File

@@ -1,5 +0,0 @@
package cloudevents
import "github.com/onsi/ginkgo/v2"
var _ = ginkgo.Describe("ManifestWork Update Strategy (MQTT)", runUpdateStrategyTest(mqttSourceInfo, clusterName))

View File

@@ -11,9 +11,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/utils/ptr"
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
@@ -21,436 +21,424 @@ import (
"open-cluster-management.io/ocm/test/integration/util"
)
func runUpdateStrategyTest(sourceInfoGetter sourceInfoGetter, clusterNameGetter clusterNameGetter) func() {
return func() {
var err error
var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
var err error
var cancel context.CancelFunc
var cancel context.CancelFunc
var clusterName string
var sourceDriver string
var sourceConfigPath string
var sourceClient workclientset.Interface
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
var clusterName string
ginkgo.BeforeEach(func() {
clusterName = utilrand.String(5)
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
ns := &corev1.Namespace{}
ns.Name = clusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = workSourceDriver
o.WorkloadSourceConfig = workSourceConfigFileName
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifest"}
commOptions := commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go runWorkAgent(ctx, o, commOptions)
// reset manifests
manifests = nil
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(clusterName, "", manifests)
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("Create only strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
sourceClient, sourceDriver, sourceConfigPath, _ = sourceInfoGetter()
gomega.Expect(sourceClient).ToNot(gomega.BeNil())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
clusterName = clusterNameGetter()
ns := &corev1.Namespace{}
ns.Name = clusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
object, _, err = util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigPath
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifest"}
commOptions := commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
go runWorkAgent(ctx, o, commOptions)
// reset manifests
manifests = nil
manifests = append(manifests, util.ToManifest(object))
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(clusterName, "", manifests)
})
ginkgo.AfterEach(func() {
err = spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
if cancel != nil {
cancel()
ginkgo.It("deployed resource should not be updated when work is updated", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeCreateOnly,
},
},
}
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 1 {
return fmt.Errorf("replicas should not be changed")
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
})
ginkgo.Context("Server side apply strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
object, _, err = util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(object))
})
ginkgo.Context("Create only strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
object, _, err = util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(object))
})
ginkgo.It("deployed resource should not be updated when work is updated", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeCreateOnly,
},
ginkgo.It("deployed resource should be applied when work is updated", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
}
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
if *deploy.Spec.Replicas != 3 {
return fmt.Errorf("replicas should be updated to 3 but got %d", *deploy.Spec.Replicas)
}
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 1 {
return fmt.Errorf("replicas should not be changed")
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
ginkgo.Context("Server side apply strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
object, _, err = util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(object))
})
ginkgo.It("deployed resource should be applied when work is updated", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
ginkgo.It("should get conflict if a field is taken by another manager", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update deployment with another field manager
err = unstructured.SetNestedField(object.Object, int64(2), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
patch, err := object.MarshalJSON()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).Patch(
context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: ptr.To[bool](true), FieldManager: "test-integration"})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Update deployment by work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Failed to apply due to conflict
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
// update work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
// remove the replica field and apply should work
unstructured.RemoveNestedField(object.Object, "spec", "replicas")
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 3 {
return fmt.Errorf("replicas should be updated to 3 but got %d", *deploy.Spec.Replicas)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("should get conflict if a field is taken by another manager", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update deployment with another field manager
err = unstructured.SetNestedField(object.Object, int64(2), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
patch, err := object.MarshalJSON()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).Patch(
context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: ptr.To[bool](true), FieldManager: "test-integration"})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Update deployment by work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Failed to apply due to conflict
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
// remove the replica field and apply should work
unstructured.RemoveNestedField(object.Object, "spec", "replicas")
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("two manifest works with different field manager", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Create another work with different fieldmanager
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
ServerSideApply: &workapiv1.ServerSideApplyConfig{
Force: true,
FieldManager: "work-agent-another",
},
},
},
}
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Update deployment replica by work should work since this work still owns the replicas field
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// This should work since this work still own replicas
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 3 {
return fmt.Errorf("expected replica is not correct, got %d", *deploy.Spec.Replicas)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Update sa field will not work
err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// This should work since this work still own replicas
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("with delete options", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Create another work with different fieldmanager
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
ServerSideApply: &workapiv1.ServerSideApplyConfig{
Force: true,
FieldManager: "work-agent-another",
},
},
},
}
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, sourceClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if len(deploy.OwnerReferences) != 2 {
return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// update deleteOption of the first work
gomega.Eventually(func() error {
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan}
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if len(deploy.OwnerReferences) != 1 {
return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
}
}
ginkgo.It("two manifest works with different field manager", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Create another work with different fieldmanager
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
ServerSideApply: &workapiv1.ServerSideApplyConfig{
Force: true,
FieldManager: "work-agent-another",
},
},
},
}
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Update deployment replica by work should work since this work still owns the replicas field
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// This should work since this work still own replicas
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 3 {
return fmt.Errorf("expected replica is not correct, got %d", *deploy.Spec.Replicas)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Update sa field will not work
err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// This should work since this work still own replicas
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("with delete options", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Create another work with different fieldmanager
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
ServerSideApply: &workapiv1.ServerSideApplyConfig{
Force: true,
FieldManager: "work-agent-another",
},
},
},
}
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if len(deploy.OwnerReferences) != 2 {
return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// update deleteOption of the first work
gomega.Eventually(func() error {
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan}
_, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if len(deploy.OwnerReferences) != 1 {
return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
})
})

View File

@@ -1,7 +0,0 @@
package cloudevents
import (
"github.com/onsi/ginkgo/v2"
)
var _ = ginkgo.Describe("ManifestWork (Kafka)", runWorkTest(kafkaSourceInfo, kClusterNameGenerator.generate))

View File

@@ -1,7 +0,0 @@
package cloudevents
import (
"github.com/onsi/ginkgo/v2"
)
var _ = ginkgo.Describe("ManifestWork (MQTT)", runWorkTest(mqttSourceInfo, clusterName))

View File

@@ -7,11 +7,12 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/openshift/library-go/pkg/controller/controllercmd"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilrand "k8s.io/apimachinery/pkg/util/rand"
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
@@ -19,190 +20,222 @@ import (
"open-cluster-management.io/ocm/test/integration/util"
)
func runWorkTest(sourceInfoGetter sourceInfoGetter, clusterNameGetter clusterNameGetter) func() {
return func() {
var err error
func runWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) {
agentConfig := spoke.NewWorkAgentConfig(commOption, o)
err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{
KubeConfig: spokeRestConfig,
EventRecorder: util.NewIntegrationTestEventRecorder("integration"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
var cancel context.CancelFunc
var _ = ginkgo.Describe("ManifestWork", func() {
var err error
var cancel context.CancelFunc
var sourceDriver string
var sourceConfigPath string
var sourceHash string
var sourceClient workclientset.Interface
var clusterName string
var clusterName string
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
var appliedManifestWorkName string
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
ginkgo.BeforeEach(func() {
clusterName = utilrand.String(5)
ginkgo.BeforeEach(func() {
sourceClient, sourceDriver, sourceConfigPath, sourceHash = sourceInfoGetter()
gomega.Expect(sourceClient).ToNot(gomega.BeNil())
ns := &corev1.Namespace{}
ns.Name = clusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
clusterName = clusterNameGetter()
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
o.WorkloadSourceDriver = workSourceDriver
o.WorkloadSourceConfig = workSourceConfigFileName
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifest", "manifestbundle"}
ns := &corev1.Namespace{}
ns.Name = clusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
commOptions := commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
go runWorkAgent(ctx, o, commOptions)
// reset manifests
manifests = nil
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(clusterName, "", manifests)
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// if the source is not kube, the uid will be used as the manifestwork name
appliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, work.UID)
})
ginkgo.AfterEach(func() {
err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
if !errors.IsNotFound(err) {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
}
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigPath
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifest", "manifestbundle"}
commOptions := commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
go runWorkAgent(ctx, o, commOptions)
// reset manifests
manifests = nil
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(clusterName, "", manifests)
work, err = sourceClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.AfterEach(func() {
err = sourceClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
if !errors.IsNotFound(err) {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
_, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("work %s in namespace %s still exists", work.Name, clusterName)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
err = spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
if cancel != nil {
cancel()
}
})
ginkgo.Context("With a single manifest", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
}
})
ginkgo.It("should create work and then apply it successfully", func() {
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("should update work and then apply it successfully", func() {
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)),
}
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests = newManifests
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// check if resource created by stale manifest is deleted once it is removed from applied resource list
gomega.Eventually(func() error {
_, err := sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
return fmt.Errorf("work %s in namespace %s still exists", work.Name, clusterName)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
err = spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == cm1 {
return fmt.Errorf("found applied resource cm1")
}
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
})
ginkgo.It("should delete work successfully", func() {
util.AssertFinalizerAdded(work.Namespace, work.Name, workSourceWorkClient, eventuallyTimeout, eventuallyInterval)
err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
if cancel != nil {
cancel()
util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", workSourceHash, work.UID), manifests,
workSourceWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
})
ginkgo.Context("With multiple manifests", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap("non-existent-namespace", cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, nil)),
}
})
ginkgo.Context("With a single manifest", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
}
})
ginkgo.It("should create work and then apply it successfully", func() {
util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval)
ginkgo.It("should create work and then apply it successfully", func() {
assertWorkCreated(sourceClient, work, manifests)
})
ginkgo.It("should update work and then apply it successfully", func() {
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)),
}
assertWorkUpdated(sourceClient, clusterName, toAppliedManifestWorkName(sourceHash, work),
work, manifests, newManifests, cm1)
})
ginkgo.It("should delete work successfully", func() {
assertWorkDeleted(sourceClient, clusterName, toAppliedManifestWorkName(sourceHash, work),
work, manifests)
})
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.Context("With multiple manifests", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, nil)),
}
})
ginkgo.It("should update work and then apply it successfully", func() {
util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval)
ginkgo.It("should create work and then apply it successfully", func() {
assertWorkCreated(sourceClient, work, manifests)
})
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
ginkgo.It("should update work and then apply it successfully", func() {
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm3, map[string]string{"x": "y"}, nil)),
}
assertWorkUpdated(sourceClient, clusterName, toAppliedManifestWorkName(sourceHash, work),
work, manifests, newManifests, cm2)
})
ginkgo.It("should delete work successfully", func() {
assertWorkDeleted(sourceClient, clusterName, toAppliedManifestWorkName(sourceHash, work),
work, manifests)
})
})
}
}
func assertWorkCreated(sourceClient workclientset.Interface, work *workapiv1.ManifestWork, manifests []workapiv1.Manifest) {
expectedStatus := []metav1.ConditionStatus{}
for range manifests {
expectedStatus = append(expectedStatus, metav1.ConditionTrue)
}
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkApplied,
metav1.ConditionTrue, expectedStatus, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, sourceClient, workapiv1.WorkAvailable,
metav1.ConditionTrue, expectedStatus, eventuallyTimeout, eventuallyInterval)
}
func assertWorkUpdated(sourceClient workclientset.Interface, clusterName, appliedManifestWorkName string,
work *workapiv1.ManifestWork, manifests, newManifests []workapiv1.Manifest, removedCM string) {
assertWorkCreated(sourceClient, work, manifests)
work, err := sourceClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests = newManifests
_, err = sourceClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// check if resource created by stale manifest is deleted once it is removed from applied resource list
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == removedCM {
return fmt.Errorf("found applied resource %s", removedCM)
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"e": "f"}, nil)),
}
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Workload.Manifests = newManifests
work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), removedCM, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
}
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
func assertWorkDeleted(sourceClient workclientset.Interface, clusterName, appliedManifestWorkName string,
work *workapiv1.ManifestWork, manifests []workapiv1.Manifest) {
util.AssertFinalizerAdded(work.Namespace, work.Name, sourceClient, eventuallyTimeout, eventuallyInterval)
// check if Available status is updated or not
util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
err := sourceClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// check if resource created by stale manifest is deleted once it is removed from applied resource list
gomega.Eventually(func() error {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
util.AssertWorkDeleted(work.Namespace, work.Name, appliedManifestWorkName, manifests,
sourceClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
}
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
if appliedResource.Name == "cm3" {
return fmt.Errorf("found appled resource cm3")
}
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), "cm3", metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
})
ginkgo.It("should delete work successfully", func() {
util.AssertFinalizerAdded(work.Namespace, work.Name, workSourceWorkClient, eventuallyTimeout, eventuallyInterval)
err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", workSourceHash, work.Name), manifests,
workSourceWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
})
})

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,156 +0,0 @@
/*
Copyright 2023 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package kafka_confluent
import (
"bytes"
"context"
"strconv"
"strings"
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/binding/format"
"github.com/cloudevents/sdk-go/v2/binding/spec"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
)
const (
prefix = "ce-"
contentTypeKey = "content-type"
)
const (
KafkaOffsetKey = "kafkaoffset"
KafkaPartitionKey = "kafkapartition"
KafkaTopicKey = "kafkatopic"
KafkaMessageKey = "kafkamessagekey"
)
var specs = spec.WithPrefix(prefix)
// Message represents a Kafka message.
// This message *can* be read several times safely
type Message struct {
internal *kafka.Message
properties map[string][]byte
format format.Format
version spec.Version
}
// Check if Message implements binding.Message
var (
_ binding.Message = (*Message)(nil)
_ binding.MessageMetadataReader = (*Message)(nil)
)
// NewMessage returns a binding.Message that holds the provided kafka.Message.
// The returned binding.Message *can* be read several times safely
// This function *doesn't* guarantee that the returned binding.Message is always a kafka_sarama.Message instance
func NewMessage(msg *kafka.Message) *Message {
if msg == nil {
panic("the kafka.Message shouldn't be nil")
}
if msg.TopicPartition.Topic == nil {
panic("the topic of kafka.Message shouldn't be nil")
}
if msg.TopicPartition.Partition < 0 || msg.TopicPartition.Offset < 0 {
panic("the partition or offset of the kafka.Message must be non-negative")
}
var contentType, contentVersion string
properties := make(map[string][]byte, len(msg.Headers)+3)
for _, header := range msg.Headers {
k := strings.ToLower(string(header.Key))
if k == strings.ToLower(contentTypeKey) {
contentType = string(header.Value)
}
if k == specs.PrefixedSpecVersionName() {
contentVersion = string(header.Value)
}
properties[k] = header.Value
}
// add the kafka message key, topic, partition and partition key to the properties
properties[prefix+KafkaOffsetKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Offset), 10))
properties[prefix+KafkaPartitionKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Partition), 10))
properties[prefix+KafkaTopicKey] = []byte(*msg.TopicPartition.Topic)
if msg.Key != nil {
properties[prefix+KafkaMessageKey] = msg.Key
}
message := &Message{
internal: msg,
properties: properties,
}
if ft := format.Lookup(contentType); ft != nil {
message.format = ft
} else if v := specs.Version(contentVersion); v != nil {
message.version = v
}
return message
}
func (m *Message) ReadEncoding() binding.Encoding {
if m.version != nil {
return binding.EncodingBinary
}
if m.format != nil {
return binding.EncodingStructured
}
return binding.EncodingUnknown
}
func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error {
if m.format != nil {
return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.Value))
}
return binding.ErrNotStructured
}
func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error {
if m.version == nil {
return binding.ErrNotBinary
}
var err error
for k, v := range m.properties {
if strings.HasPrefix(k, prefix) {
attr := m.version.Attribute(k)
if attr != nil {
err = encoder.SetAttribute(attr, string(v))
} else {
err = encoder.SetExtension(strings.TrimPrefix(k, prefix), string(v))
}
} else if k == strings.ToLower(contentTypeKey) {
err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), string(v))
}
if err != nil {
return err
}
}
if m.internal.Value != nil {
err = encoder.SetData(bytes.NewBuffer(m.internal.Value))
}
return err
}
func (m *Message) Finish(error) error {
return nil
}
func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) {
attr := m.version.AttributeFromKind(k)
if attr == nil {
return nil, nil
}
return attr, m.properties[attr.PrefixedName()]
}
func (m *Message) GetExtension(name string) interface{} {
return m.properties[prefix+name]
}

View File

@@ -1,151 +0,0 @@
/*
Copyright 2023 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package kafka_confluent
import (
"context"
"errors"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
)
// Option is the function signature required to be considered an kafka_confluent.Option.
type Option func(*Protocol) error
// WithConfigMap sets the configMap to init the kafka client.
func WithConfigMap(config *kafka.ConfigMap) Option {
return func(p *Protocol) error {
if config == nil {
return errors.New("the kafka.ConfigMap option must not be nil")
}
p.kafkaConfigMap = config
return nil
}
}
// WithSenderTopic sets the defaultTopic for the kafka.Producer.
func WithSenderTopic(defaultTopic string) Option {
return func(p *Protocol) error {
if defaultTopic == "" {
return errors.New("the producer topic option must not be nil")
}
p.producerDefaultTopic = defaultTopic
return nil
}
}
// WithReceiverTopics sets the topics for the kafka.Consumer.
func WithReceiverTopics(topics []string) Option {
return func(p *Protocol) error {
if topics == nil {
return errors.New("the consumer topics option must not be nil")
}
p.consumerTopics = topics
return nil
}
}
// WithRebalanceCallBack sets the callback for rebalancing of the consumer group.
func WithRebalanceCallBack(rebalanceCb kafka.RebalanceCb) Option {
return func(p *Protocol) error {
if rebalanceCb == nil {
return errors.New("the consumer group rebalance callback must not be nil")
}
p.consumerRebalanceCb = rebalanceCb
return nil
}
}
// WithPollTimeout sets timeout of the consumer polling for message or events, return nil on timeout.
func WithPollTimeout(timeoutMs int) Option {
return func(p *Protocol) error {
p.consumerPollTimeout = timeoutMs
return nil
}
}
// WithSender set a kafka.Producer instance to init the client directly.
func WithSender(producer *kafka.Producer) Option {
return func(p *Protocol) error {
if producer == nil {
return errors.New("the producer option must not be nil")
}
p.producer = producer
return nil
}
}
// WithErrorHandler provide a func on how to handle the kafka.Error which the kafka.Consumer has polled.
func WithErrorHandler(handler func(ctx context.Context, err kafka.Error)) Option {
return func(p *Protocol) error {
p.consumerErrorHandler = handler
return nil
}
}
// WithReceiver set a kafka.Consumer instance to init the client directly.
func WithReceiver(consumer *kafka.Consumer) Option {
return func(p *Protocol) error {
if consumer == nil {
return errors.New("the consumer option must not be nil")
}
p.consumer = consumer
return nil
}
}
// Opaque key type used to store topicPartitionOffsets: assign them from ctx.
type topicPartitionOffsetsType struct{}
var offsetKey = topicPartitionOffsetsType{}
// WithTopicPartitionOffsets will set the positions where the consumer starts consuming from.
func WithTopicPartitionOffsets(ctx context.Context, topicPartitionOffsets []kafka.TopicPartition) context.Context {
if len(topicPartitionOffsets) == 0 {
panic("the topicPartitionOffsets cannot be empty")
}
for _, offset := range topicPartitionOffsets {
if offset.Topic == nil || *(offset.Topic) == "" {
panic("the kafka topic cannot be nil or empty")
}
if offset.Partition < 0 || offset.Offset < 0 {
panic("the kafka partition/offset must be non-negative")
}
}
return context.WithValue(ctx, offsetKey, topicPartitionOffsets)
}
// TopicPartitionOffsetsFrom looks in the given context and returns []kafka.TopicPartition or nil if not set
func TopicPartitionOffsetsFrom(ctx context.Context) []kafka.TopicPartition {
c := ctx.Value(offsetKey)
if c != nil {
if s, ok := c.([]kafka.TopicPartition); ok {
return s
}
}
return nil
}
// Opaque key type used to store message key
type messageKeyType struct{}
var keyForMessageKey = messageKeyType{}
// WithMessageKey returns back a new context with the given messageKey.
func WithMessageKey(ctx context.Context, messageKey string) context.Context {
return context.WithValue(ctx, keyForMessageKey, messageKey)
}
// MessageKeyFrom looks in the given context and returns `messageKey` as a string if found and valid, otherwise "".
func MessageKeyFrom(ctx context.Context) string {
c := ctx.Value(keyForMessageKey)
if c != nil {
if s, ok := c.(string); ok {
return s
}
}
return ""
}

View File

@@ -1,247 +0,0 @@
/*
Copyright 2023 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package kafka_confluent
import (
"context"
"errors"
"fmt"
"io"
"sync"
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/protocol"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
cecontext "github.com/cloudevents/sdk-go/v2/context"
)
var (
_ protocol.Sender = (*Protocol)(nil)
_ protocol.Opener = (*Protocol)(nil)
_ protocol.Receiver = (*Protocol)(nil)
_ protocol.Closer = (*Protocol)(nil)
)
type Protocol struct {
kafkaConfigMap *kafka.ConfigMap
consumer *kafka.Consumer
consumerTopics []string
consumerRebalanceCb kafka.RebalanceCb // optional
consumerPollTimeout int // optional
consumerErrorHandler func(ctx context.Context, err kafka.Error) // optional
consumerMux sync.Mutex
consumerIncoming chan *kafka.Message
consumerCtx context.Context
consumerCancel context.CancelFunc
producer *kafka.Producer
producerDefaultTopic string // optional
closerMux sync.Mutex
}
func New(opts ...Option) (*Protocol, error) {
p := &Protocol{
consumerPollTimeout: 100,
consumerIncoming: make(chan *kafka.Message),
}
if err := p.applyOptions(opts...); err != nil {
return nil, err
}
if p.kafkaConfigMap != nil {
if p.consumerTopics != nil && p.consumer == nil {
consumer, err := kafka.NewConsumer(p.kafkaConfigMap)
if err != nil {
return nil, err
}
p.consumer = consumer
}
if p.producerDefaultTopic != "" && p.producer == nil {
producer, err := kafka.NewProducer(p.kafkaConfigMap)
if err != nil {
return nil, err
}
p.producer = producer
}
if p.producer == nil && p.consumer == nil {
return nil, errors.New("at least receiver or sender topic must be set")
}
}
if p.producerDefaultTopic != "" && p.producer == nil {
return nil, fmt.Errorf("at least configmap or producer must be set for the sender topic: %s", p.producerDefaultTopic)
}
if len(p.consumerTopics) > 0 && p.consumer == nil {
return nil, fmt.Errorf("at least configmap or consumer must be set for the receiver topics: %s", p.consumerTopics)
}
if p.kafkaConfigMap == nil && p.producer == nil && p.consumer == nil {
return nil, errors.New("at least one of the following to initialize the protocol must be set: config, producer, or consumer")
}
return p, nil
}
// Events returns the events channel used by Confluent Kafka to deliver the result from a produce, i.e., send, operation.
// When using this SDK to produce (send) messages, this channel must be monitored to avoid resource leaks and this channel becoming full. See Confluent SDK for Go for details on the implementation.
func (p *Protocol) Events() (chan kafka.Event, error) {
if p.producer == nil {
return nil, errors.New("producer not set")
}
return p.producer.Events(), nil
}
func (p *Protocol) applyOptions(opts ...Option) error {
for _, fn := range opts {
if err := fn(p); err != nil {
return err
}
}
return nil
}
// Send message by kafka.Producer. You must monitor the Events() channel when using this function.
func (p *Protocol) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) (err error) {
if p.producer == nil {
return errors.New("producer client must be set")
}
p.closerMux.Lock()
defer p.closerMux.Unlock()
if p.producer.IsClosed() {
return errors.New("producer is closed")
}
defer in.Finish(err)
kafkaMsg := &kafka.Message{
TopicPartition: kafka.TopicPartition{
Topic: &p.producerDefaultTopic,
Partition: kafka.PartitionAny,
},
}
if topic := cecontext.TopicFrom(ctx); topic != "" {
kafkaMsg.TopicPartition.Topic = &topic
}
if messageKey := MessageKeyFrom(ctx); messageKey != "" {
kafkaMsg.Key = []byte(messageKey)
}
if err = WriteProducerMessage(ctx, in, kafkaMsg, transformers...); err != nil {
return fmt.Errorf("create producer message: %w", err)
}
if err = p.producer.Produce(kafkaMsg, nil); err != nil {
return fmt.Errorf("produce message: %w", err)
}
return nil
}
func (p *Protocol) OpenInbound(ctx context.Context) error {
if p.consumer == nil {
return errors.New("the consumer client must be set")
}
if p.consumerTopics == nil {
return errors.New("the consumer topics must be set")
}
p.consumerMux.Lock()
defer p.consumerMux.Unlock()
logger := cecontext.LoggerFrom(ctx)
// Query committed offsets for each partition
if positions := TopicPartitionOffsetsFrom(ctx); positions != nil {
if err := p.consumer.Assign(positions); err != nil {
return err
}
}
logger.Infof("Subscribing to topics: %v", p.consumerTopics)
err := p.consumer.SubscribeTopics(p.consumerTopics, p.consumerRebalanceCb)
if err != nil {
return err
}
p.closerMux.Lock()
p.consumerCtx, p.consumerCancel = context.WithCancel(ctx)
defer p.consumerCancel()
p.closerMux.Unlock()
defer func() {
if !p.consumer.IsClosed() {
logger.Infof("Closing consumer %v", p.consumerTopics)
if err = p.consumer.Close(); err != nil {
logger.Errorf("failed to close the consumer: %v", err)
}
}
close(p.consumerIncoming)
}()
for {
select {
case <-p.consumerCtx.Done():
return p.consumerCtx.Err()
default:
ev := p.consumer.Poll(p.consumerPollTimeout)
if ev == nil {
continue
}
switch e := ev.(type) {
case *kafka.Message:
p.consumerIncoming <- e
case kafka.Error:
// Errors should generally be considered informational, the client will try to automatically recover.
// But in here, we choose to terminate the application if all brokers are down.
logger.Infof("Error %v: %v", e.Code(), e)
if p.consumerErrorHandler != nil {
p.consumerErrorHandler(ctx, e)
}
if e.Code() == kafka.ErrAllBrokersDown {
logger.Error("All broker connections are down")
return e
}
}
}
}
}
// Receive implements Receiver.Receive
func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) {
select {
case m, ok := <-p.consumerIncoming:
if !ok {
return nil, io.EOF
}
msg := NewMessage(m)
return msg, nil
case <-ctx.Done():
return nil, io.EOF
}
}
// Close cleans up resources after use. Must be called to properly close underlying Kafka resources and avoid resource leaks
func (p *Protocol) Close(ctx context.Context) error {
p.closerMux.Lock()
defer p.closerMux.Unlock()
logger := cecontext.LoggerFrom(ctx)
if p.consumerCancel != nil {
p.consumerCancel()
}
if p.producer != nil && !p.producer.IsClosed() {
// Flush and close the producer with a 10 seconds timeout (closes Events channel)
for p.producer.Flush(10000) > 0 {
logger.Info("Flushing outstanding messages")
}
p.producer.Close()
}
return nil
}

View File

@@ -1,125 +0,0 @@
/*
Copyright 2023 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package kafka_confluent
import (
"bytes"
"context"
"io"
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/binding/format"
"github.com/cloudevents/sdk-go/v2/binding/spec"
"github.com/cloudevents/sdk-go/v2/types"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
)
// extends the kafka.Message to support the interfaces for the converting it to binding.Message
type kafkaMessageWriter kafka.Message
var (
_ binding.StructuredWriter = (*kafkaMessageWriter)(nil)
_ binding.BinaryWriter = (*kafkaMessageWriter)(nil)
)
// WriteProducerMessage fills the provided pubMessage with the message m.
// Using context you can tweak the encoding processing (more details on binding.Write documentation).
func WriteProducerMessage(ctx context.Context, in binding.Message, kafkaMsg *kafka.Message,
transformers ...binding.Transformer,
) error {
structuredWriter := (*kafkaMessageWriter)(kafkaMsg)
binaryWriter := (*kafkaMessageWriter)(kafkaMsg)
_, err := binding.Write(
ctx,
in,
structuredWriter,
binaryWriter,
transformers...,
)
return err
}
func (b *kafkaMessageWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error {
b.Headers = []kafka.Header{{
Key: contentTypeKey,
Value: []byte(f.MediaType()),
}}
var buf bytes.Buffer
_, err := io.Copy(&buf, event)
if err != nil {
return err
}
b.Value = buf.Bytes()
return nil
}
func (b *kafkaMessageWriter) Start(ctx context.Context) error {
b.Headers = []kafka.Header{}
return nil
}
func (b *kafkaMessageWriter) End(ctx context.Context) error {
return nil
}
func (b *kafkaMessageWriter) SetData(reader io.Reader) error {
buf, ok := reader.(*bytes.Buffer)
if !ok {
buf = new(bytes.Buffer)
_, err := io.Copy(buf, reader)
if err != nil {
return err
}
}
b.Value = buf.Bytes()
return nil
}
func (b *kafkaMessageWriter) SetAttribute(attribute spec.Attribute, value interface{}) error {
if attribute.Kind() == spec.DataContentType {
if value == nil {
b.removeProperty(contentTypeKey)
return nil
}
b.addProperty(contentTypeKey, value)
} else {
key := prefix + attribute.Name()
if value == nil {
b.removeProperty(key)
return nil
}
b.addProperty(key, value)
}
return nil
}
func (b *kafkaMessageWriter) SetExtension(name string, value interface{}) error {
if value == nil {
b.removeProperty(prefix + name)
}
return b.addProperty(prefix+name, value)
}
func (b *kafkaMessageWriter) removeProperty(key string) {
for i, v := range b.Headers {
if v.Key == key {
b.Headers = append(b.Headers[:i], b.Headers[i+1:]...)
break
}
}
}
func (b *kafkaMessageWriter) addProperty(key string, value interface{}) error {
s, err := types.Format(value)
if err != nil {
return err
}
b.Headers = append(b.Headers, kafka.Header{Key: key, Value: []byte(s)})
return nil
}

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,2 +0,0 @@
testconf.json
go_rdkafka_generr/go_rdkafka_generr

View File

@@ -1,58 +0,0 @@
/**
* Copyright 2016-2019 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"fmt"
)
/*
#include "select_rdkafka.h"
//Minimum required librdkafka version. This is checked both during
//build-time and runtime.
//Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error
//defines and strings in sync.
//
#define MIN_RD_KAFKA_VERSION 0x02030000
#ifdef __APPLE__
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
#else
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
#endif
#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION
#ifdef __APPLE__
#error "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
#else
#error "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
#endif
#endif
*/
import "C"
func versionCheck() error {
ver, verstr := LibraryVersion()
if ver < C.MIN_RD_KAFKA_VERSION {
return newErrorFromString(ErrNotImplemented,
fmt.Sprintf("%s: librdkafka version %s (0x%x) detected",
C.MIN_VER_ERRSTR, verstr, ver))
}
return nil
}

View File

@@ -1,159 +0,0 @@
# Information for confluent-kafka-go developers
## Development process
1. Use go1.19 (and related tooling) for development on confluent-kafka-go.
2. Make sure to run `gofmt` and `go vet` on your code.
3. While there is no hard-limit, try to keep your line length under 80
characters.
3. [Test](#testing) your changes and create a PR.
NOTE: Whenever librdkafka error codes are updated make sure to run generate
before building:
```
$ make -f mk/Makefile generr
$ go build ./...
```
## Testing
Some of the tests included in this directory, the benchmark and integration tests in particular,
require an existing Kafka cluster and a testconf.json configuration file to
provide tests with bootstrap brokers, topic name, etc.
The format of testconf.json is a JSON object:
```
{
"Brokers": "<bootstrap-brokers>",
"Topic": "<test-topic-name>"
}
```
See testconf-example.json for an example and full set of available options.
To run unit-tests:
```
$ go test
```
To run benchmark tests:
```
$ go test -bench .
```
For the code coverage:
```
$ go test -coverprofile=coverage.out -bench=.
$ go tool cover -func=coverage.out
```
## Build tags
Different build types are supported through Go build tags (`-tags ..`),
these tags should be specified on the **application** build/get/install command.
* By default the bundled platform-specific static build of librdkafka will
be used. This works out of the box on Mac OSX and glibc-based Linux distros,
such as Ubuntu and CentOS.
* `-tags musl` - must be specified when building on/for musl-based Linux
distros, such as Alpine. Will use the bundled static musl build of
librdkafka.
* `-tags dynamic` - link librdkafka dynamically. A shared librdkafka library
must be installed manually through other means (apt-get, yum, build from
source, etc).
## Release process
For each release candidate and final release, perform the following steps:
### Review the CHANGELOG
### Update bundle to latest librdkafka
See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md).
### Update librdkafka version requirement
Update the minimum required librdkafka version in `kafka/00version.go`
and `README.md` and the version in `examples/go.mod` and `mk/doc-gen.py`.
### Update error codes
Error codes can be automatically generated from the current librdkafka version.
Update generated error codes:
$ make -f mk/Makefile generr
# Verify by building
## Generating HTML documentation
To generate one-page HTML documentation run the mk/doc-gen.py script from the
top-level directory. This script requires the beautifulsoup4 Python package.
```
$ source .../your/virtualenv/bin/activate
$ pip install beautifulsoup4
...
$ make -f mk/Makefile docs
```
### Rebuild everything
$ go clean -i ./...
$ go build ./...
### Run full test suite
Set up a test cluster using whatever mechanism you typically use
(docker, trivup, ccloud, ..).
Make sure to update `kafka/testconf.json` as needed (broker list, $BROKERS)
Run test suite:
$ go test ./...
### Verify examples
Manually verify that the examples/ applications work.
Also make sure the examples in README.md work.
### Commit any changes
Make sure to push to github before creating the tag to have CI tests pass.
### Create and push tag
$ git tag v1.3.0
$ git push --dry-run origin v1.3.0
# Remove --dry-run and re-execute if it looks ok.
### Create release notes page on github
### Update version in Confluent docs
Put the new version in settings.sh of these two repos
https://github.com/confluentinc/docs
https://github.com/confluentinc/docs-platform
### Don't forget tweeting it!

File diff suppressed because it is too large Load Diff

View File

@@ -1,587 +0,0 @@
/**
* Copyright 2018 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"fmt"
"time"
"unsafe"
)
/*
#include "select_rdkafka.h"
#include <stdlib.h>
*/
import "C"
// AdminOptionOperationTimeout sets the broker's operation timeout, such as the
// timeout for CreateTopics to complete the creation of topics on the controller
// before returning a result to the application.
//
// CreateTopics, DeleteTopics, CreatePartitions:
// a value 0 will return immediately after triggering topic
// creation, while > 0 will wait this long for topic creation to propagate
// in cluster.
//
// Default: 0 (return immediately).
//
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
type AdminOptionOperationTimeout struct {
isSet bool
val time.Duration
}
func (ao AdminOptionOperationTimeout) supportsCreateTopics() {
}
func (ao AdminOptionOperationTimeout) supportsDeleteTopics() {
}
func (ao AdminOptionOperationTimeout) supportsCreatePartitions() {
}
func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet {
return nil
}
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cErr := C.rd_kafka_AdminOptions_set_operation_timeout(
cOptions, C.int(durationToMilliseconds(ao.val)),
cErrstr, cErrstrSize)
if cErr != 0 {
C.rd_kafka_AdminOptions_destroy(cOptions)
return newCErrorFromString(cErr,
fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr)))
}
return nil
}
// SetAdminOperationTimeout sets the broker's operation timeout, such as the
// timeout for CreateTopics to complete the creation of topics on the controller
// before returning a result to the application.
//
// CreateTopics, DeleteTopics, CreatePartitions:
// a value 0 will return immediately after triggering topic
// creation, while > 0 will wait this long for topic creation to propagate
// in cluster.
//
// Default: 0 (return immediately).
//
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) {
ao.isSet = true
ao.val = t
return ao
}
// AdminOptionRequestTimeout sets the overall request timeout, including broker
// lookup, request transmission, operation time on broker, and response.
//
// Default: `socket.timeout.ms`.
//
// Valid for all Admin API methods.
type AdminOptionRequestTimeout struct {
isSet bool
val time.Duration
}
func (ao AdminOptionRequestTimeout) supportsCreateTopics() {
}
func (ao AdminOptionRequestTimeout) supportsDeleteTopics() {
}
func (ao AdminOptionRequestTimeout) supportsCreatePartitions() {
}
func (ao AdminOptionRequestTimeout) supportsAlterConfigs() {
}
func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() {
}
func (ao AdminOptionRequestTimeout) supportsCreateACLs() {
}
func (ao AdminOptionRequestTimeout) supportsDescribeACLs() {
}
func (ao AdminOptionRequestTimeout) supportsDeleteACLs() {
}
func (ao AdminOptionRequestTimeout) supportsListConsumerGroups() {
}
func (ao AdminOptionRequestTimeout) supportsDescribeConsumerGroups() {
}
func (ao AdminOptionRequestTimeout) supportsDescribeTopics() {
}
func (ao AdminOptionRequestTimeout) supportsDescribeCluster() {
}
func (ao AdminOptionRequestTimeout) supportsDeleteConsumerGroups() {
}
func (ao AdminOptionRequestTimeout) supportsListConsumerGroupOffsets() {
}
func (ao AdminOptionRequestTimeout) supportsAlterConsumerGroupOffsets() {
}
func (ao AdminOptionRequestTimeout) supportsListOffsets() {
}
func (ao AdminOptionRequestTimeout) supportsDescribeUserScramCredentials() {
}
func (ao AdminOptionRequestTimeout) supportsAlterUserScramCredentials() {
}
func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet {
return nil
}
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cErr := C.rd_kafka_AdminOptions_set_request_timeout(
cOptions, C.int(durationToMilliseconds(ao.val)),
cErrstr, cErrstrSize)
if cErr != 0 {
C.rd_kafka_AdminOptions_destroy(cOptions)
return newCErrorFromString(cErr,
fmt.Sprintf("%s", C.GoString(cErrstr)))
}
return nil
}
// SetAdminRequestTimeout sets the overall request timeout, including broker
// lookup, request transmission, operation time on broker, and response.
//
// Default: `socket.timeout.ms`.
//
// Valid for all Admin API methods.
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) {
ao.isSet = true
ao.val = t
return ao
}
// IsolationLevel is a type which is used for AdminOptions to set the IsolationLevel.
type IsolationLevel int
const (
// IsolationLevelReadUncommitted - read uncommitted isolation level
IsolationLevelReadUncommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED)
// IsolationLevelReadCommitted - read committed isolation level
IsolationLevelReadCommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED)
)
// AdminOptionIsolationLevel sets the overall request IsolationLevel.
//
// Default: `ReadUncommitted`.
//
// Valid for ListOffsets.
type AdminOptionIsolationLevel struct {
isSet bool
val IsolationLevel
}
func (ao AdminOptionIsolationLevel) supportsListOffsets() {
}
func (ao AdminOptionIsolationLevel) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet {
return nil
}
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cError := C.rd_kafka_AdminOptions_set_isolation_level(
cOptions, C.rd_kafka_IsolationLevel_t(ao.val))
if cError != nil {
C.rd_kafka_AdminOptions_destroy(cOptions)
return newErrorFromCErrorDestroy(cError)
}
return nil
}
// SetAdminIsolationLevel sets the overall IsolationLevel for a request.
//
// Default: `ReadUncommitted`.
//
// Valid for ListOffsets.
func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel) {
ao.isSet = true
ao.val = isolationLevel
return ao
}
// AdminOptionValidateOnly tells the broker to only validate the request,
// without performing the requested operation (create topics, etc).
//
// Default: false.
//
// Valid for CreateTopics, CreatePartitions, AlterConfigs
type AdminOptionValidateOnly struct {
isSet bool
val bool
}
func (ao AdminOptionValidateOnly) supportsCreateTopics() {
}
func (ao AdminOptionValidateOnly) supportsCreatePartitions() {
}
func (ao AdminOptionValidateOnly) supportsAlterConfigs() {
}
func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet {
return nil
}
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cErr := C.rd_kafka_AdminOptions_set_validate_only(
cOptions, bool2cint(ao.val),
cErrstr, cErrstrSize)
if cErr != 0 {
C.rd_kafka_AdminOptions_destroy(cOptions)
return newCErrorFromString(cErr,
fmt.Sprintf("%s", C.GoString(cErrstr)))
}
return nil
}
// SetAdminValidateOnly tells the broker to only validate the request,
// without performing the requested operation (create topics, etc).
//
// Default: false.
//
// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) {
ao.isSet = true
ao.val = validateOnly
return ao
}
// AdminOptionRequireStableOffsets decides if the broker should return stable
// offsets (transaction-committed).
//
// Default: false
//
// Valid for ListConsumerGroupOffsets.
type AdminOptionRequireStableOffsets struct {
isSet bool
val bool
}
func (ao AdminOptionRequireStableOffsets) supportsListConsumerGroupOffsets() {
}
func (ao AdminOptionRequireStableOffsets) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet {
return nil
}
cError := C.rd_kafka_AdminOptions_set_require_stable_offsets(
cOptions, bool2cint(ao.val))
if cError != nil {
C.rd_kafka_AdminOptions_destroy(cOptions)
return newErrorFromCErrorDestroy(cError)
}
return nil
}
// SetAdminRequireStableOffsets decides if the broker should return stable
// offsets (transaction-committed).
//
// Default: false
//
// Valid for ListConsumerGroupOffsets.
func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets) {
ao.isSet = true
ao.val = val
return ao
}
// AdminOptionMatchConsumerGroupStates decides groups in which state(s) should be
// listed.
//
// Default: nil (lists groups in all states).
//
// Valid for ListConsumerGroups.
type AdminOptionMatchConsumerGroupStates struct {
isSet bool
val []ConsumerGroupState
}
func (ao AdminOptionMatchConsumerGroupStates) supportsListConsumerGroups() {
}
func (ao AdminOptionMatchConsumerGroupStates) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet || ao.val == nil {
return nil
}
// Convert states from Go slice to C pointer.
cStates := make([]C.rd_kafka_consumer_group_state_t, len(ao.val))
cStatesCount := C.size_t(len(ao.val))
for idx, state := range ao.val {
cStates[idx] = C.rd_kafka_consumer_group_state_t(state)
}
cStatesPtr := ((*C.rd_kafka_consumer_group_state_t)(&cStates[0]))
cError := C.rd_kafka_AdminOptions_set_match_consumer_group_states(
cOptions, cStatesPtr, cStatesCount)
if cError != nil {
C.rd_kafka_AdminOptions_destroy(cOptions)
return newErrorFromCErrorDestroy(cError)
}
return nil
}
// AdminOptionIncludeAuthorizedOperations decides if the broker should return
// authorized operations.
//
// Default: false
//
// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster.
type AdminOptionIncludeAuthorizedOperations struct {
isSet bool
val bool
}
func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeConsumerGroups() {
}
func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeTopics() {
}
func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeCluster() {
}
func (ao AdminOptionIncludeAuthorizedOperations) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet {
return nil
}
cError := C.rd_kafka_AdminOptions_set_include_authorized_operations(
cOptions, bool2cint(ao.val))
if cError != nil {
C.rd_kafka_AdminOptions_destroy(cOptions)
return newErrorFromCErrorDestroy(cError)
}
return nil
}
// SetAdminOptionIncludeAuthorizedOperations decides if the broker should return
// authorized operations.
//
// Default: false
//
// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster.
func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations) {
ao.isSet = true
ao.val = val
return ao
}
// SetAdminMatchConsumerGroupStates decides groups in which state(s) should be
// listed.
//
// Default: nil (lists groups in all states).
//
// Valid for ListConsumerGroups.
func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates) {
ao.isSet = true
ao.val = val
return ao
}
// CreateTopicsAdminOption - see setters.
//
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
type CreateTopicsAdminOption interface {
supportsCreateTopics()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DeleteTopicsAdminOption - see setters.
//
// See SetAdminRequestTimeout, SetAdminOperationTimeout.
type DeleteTopicsAdminOption interface {
supportsDeleteTopics()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// CreatePartitionsAdminOption - see setters.
//
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
type CreatePartitionsAdminOption interface {
supportsCreatePartitions()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// AlterConfigsAdminOption - see setters.
//
// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental.
type AlterConfigsAdminOption interface {
supportsAlterConfigs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DescribeConfigsAdminOption - see setters.
//
// See SetAdminRequestTimeout.
type DescribeConfigsAdminOption interface {
supportsDescribeConfigs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// CreateACLsAdminOption - see setter.
//
// See SetAdminRequestTimeout
type CreateACLsAdminOption interface {
supportsCreateACLs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DescribeACLsAdminOption - see setter.
//
// See SetAdminRequestTimeout
type DescribeACLsAdminOption interface {
supportsDescribeACLs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DeleteACLsAdminOption - see setter.
//
// See SetAdminRequestTimeout
type DeleteACLsAdminOption interface {
supportsDeleteACLs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// ListConsumerGroupsAdminOption - see setter.
//
// See SetAdminRequestTimeout, SetAdminMatchConsumerGroupStates.
type ListConsumerGroupsAdminOption interface {
supportsListConsumerGroups()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DescribeConsumerGroupsAdminOption - see setter.
//
// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations.
type DescribeConsumerGroupsAdminOption interface {
supportsDescribeConsumerGroups()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DescribeTopicsAdminOption - see setter.
//
// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations.
type DescribeTopicsAdminOption interface {
supportsDescribeTopics()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DescribeClusterAdminOption - see setter.
//
// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations.
type DescribeClusterAdminOption interface {
supportsDescribeCluster()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DeleteConsumerGroupsAdminOption - see setters.
//
// See SetAdminRequestTimeout.
type DeleteConsumerGroupsAdminOption interface {
supportsDeleteConsumerGroups()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// ListConsumerGroupOffsetsAdminOption - see setter.
//
// See SetAdminRequestTimeout, SetAdminRequireStableOffsets.
type ListConsumerGroupOffsetsAdminOption interface {
supportsListConsumerGroupOffsets()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// AlterConsumerGroupOffsetsAdminOption - see setter.
//
// See SetAdminRequestTimeout.
type AlterConsumerGroupOffsetsAdminOption interface {
supportsAlterConsumerGroupOffsets()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DescribeUserScramCredentialsAdminOption - see setter.
//
// See SetAdminRequestTimeout.
type DescribeUserScramCredentialsAdminOption interface {
supportsDescribeUserScramCredentials()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// AlterUserScramCredentialsAdminOption - see setter.
//
// See SetAdminRequestTimeout.
type AlterUserScramCredentialsAdminOption interface {
supportsAlterUserScramCredentials()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// ListOffsetsAdminOption - see setter.
//
// See SetAdminRequestTimeout, SetAdminIsolationLevel.
type ListOffsetsAdminOption interface {
supportsListOffsets()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// AdminOption is a generic type not to be used directly.
//
// See CreateTopicsAdminOption et.al.
type AdminOption interface {
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) {
cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType)
for _, opt := range options {
if opt == nil {
continue
}
err := opt.apply(cOptions)
if err != nil {
return nil, err
}
}
return cOptions, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +0,0 @@
// +build !dynamic
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_amd64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static darwin_amd64 from librdkafka-static-bundle-v2.3.0.tgz"

View File

@@ -1,13 +0,0 @@
// +build !dynamic
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_arm64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static darwin_arm64 from librdkafka-static-bundle-v2.3.0.tgz"

View File

@@ -1,10 +0,0 @@
//go:build dynamic
// +build dynamic
package kafka
// #cgo pkg-config: rdkafka
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "dynamically linked to librdkafka"

View File

@@ -1,13 +0,0 @@
// +build !dynamic
// +build !musl
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_amd64.a -lm -ldl -lpthread -lrt
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static glibc_linux_amd64 from librdkafka-static-bundle-v2.3.0.tgz"

View File

@@ -1,13 +0,0 @@
// +build !dynamic
// +build !musl
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_arm64.a -lm -ldl -lpthread -lrt
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static glibc_linux_arm64 from librdkafka-static-bundle-v2.3.0.tgz"

View File

@@ -1,13 +0,0 @@
// +build !dynamic
// +build musl
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_amd64.a -lm -ldl -lpthread -lrt -lpthread -lrt
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static musl_linux_amd64 from librdkafka-static-bundle-v2.3.0.tgz"

View File

@@ -1,13 +0,0 @@
// +build !dynamic
// +build musl
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_arm64.a -lm -ldl -lpthread -lrt -lpthread -lrt
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static musl_linux_arm64 from librdkafka-static-bundle-v2.3.0.tgz"

View File

@@ -1,13 +0,0 @@
// +build !dynamic
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_windows.a -lws2_32 -lsecur32 -lcrypt32
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v2.3.0.tgz"

View File

@@ -1,299 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"fmt"
"go/types"
"reflect"
"strings"
"unsafe"
)
/*
#include <stdlib.h>
#include "select_rdkafka.h"
*/
import "C"
// ConfigValue supports the following types:
//
// bool, int, string, any type with the standard String() interface
type ConfigValue interface{}
// ConfigMap is a map containing standard librdkafka configuration properties as documented in:
// https://github.com/confluentinc/librdkafka/tree/master/CONFIGURATION.md
//
// The special property "default.topic.config" (optional) is a ConfigMap
// containing default topic configuration properties.
//
// The use of "default.topic.config" is deprecated,
// topic configuration properties shall be specified in the standard ConfigMap.
// For backwards compatibility, "default.topic.config" (if supplied)
// takes precedence.
type ConfigMap map[string]ConfigValue
// SetKey sets configuration property key to value.
//
// For user convenience a key prefixed with {topic}. will be
// set on the "default.topic.config" sub-map, this use is deprecated.
func (m ConfigMap) SetKey(key string, value ConfigValue) error {
if strings.HasPrefix(key, "{topic}.") {
_, found := m["default.topic.config"]
if !found {
m["default.topic.config"] = ConfigMap{}
}
m["default.topic.config"].(ConfigMap)[strings.TrimPrefix(key, "{topic}.")] = value
} else {
m[key] = value
}
return nil
}
// Set implements flag.Set (command line argument parser) as a convenience
// for `-X key=value` config.
func (m ConfigMap) Set(kv string) error {
i := strings.Index(kv, "=")
if i == -1 {
return newErrorFromString(ErrInvalidArg, "Expected key=value")
}
k := kv[:i]
v := kv[i+1:]
return m.SetKey(k, v)
}
func value2string(v ConfigValue) (ret string, errstr string) {
errstr = ""
switch x := v.(type) {
case bool:
if x {
ret = "true"
} else {
ret = "false"
}
case int:
ret = fmt.Sprintf("%d", x)
case string:
ret = x
case types.Slice:
ret = ""
arr := v.([]ConfigValue)
for _, i := range arr {
temp, err := value2string(i)
if err != "" {
ret = ""
errstr = fmt.Sprintf("Invalid value type %T", v)
break
}
ret += temp + ","
}
if len(ret) != 0 {
ret = ret[:len(ret)-1]
}
case fmt.Stringer:
ret = x.String()
default:
ret = ""
errstr = fmt.Sprintf("Invalid value type %T", v)
}
return ret, errstr
}
// rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t
// into a common interface.
type rdkAnyconf interface {
set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t
}
func anyconfSet(anyconf rdkAnyconf, key string, val ConfigValue) (err error) {
value, errstr := value2string(val)
if errstr != "" {
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s for key %s (expected string,bool,int,ConfigMap)", errstr, key))
}
cKey := C.CString(key)
defer C.free(unsafe.Pointer(cKey))
cVal := C.CString(value)
defer C.free(unsafe.Pointer(cVal))
cErrstr := (*C.char)(C.malloc(C.size_t(128)))
defer C.free(unsafe.Pointer(cErrstr))
if anyconf.set(cKey, cVal, cErrstr, 128) != C.RD_KAFKA_CONF_OK {
return newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
}
return nil
}
// we need these typedefs to workaround a crash in golint
// when parsing the set() methods below
type rdkConf C.rd_kafka_conf_t
type rdkTopicConf C.rd_kafka_topic_conf_t
func (cConf *rdkConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t {
return C.rd_kafka_conf_set((*C.rd_kafka_conf_t)(cConf), cKey, cVal, cErrstr, C.size_t(errstrSize))
}
func (ctopicConf *rdkTopicConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t {
return C.rd_kafka_topic_conf_set((*C.rd_kafka_topic_conf_t)(ctopicConf), cKey, cVal, cErrstr, C.size_t(errstrSize))
}
func configConvertAnyconf(m ConfigMap, anyconf rdkAnyconf) (err error) {
// set plugins first, any plugin-specific configuration depends on
// the plugin to have already been set
pluginPaths, ok := m["plugin.library.paths"]
if ok {
err = anyconfSet(anyconf, "plugin.library.paths", pluginPaths)
if err != nil {
return err
}
}
for k, v := range m {
if k == "plugin.library.paths" {
continue
}
switch v.(type) {
case ConfigMap:
/* Special sub-ConfigMap, only used for default.topic.config */
if k != "default.topic.config" {
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("Invalid type for key %s", k))
}
var cTopicConf = C.rd_kafka_topic_conf_new()
err = configConvertAnyconf(v.(ConfigMap),
(*rdkTopicConf)(cTopicConf))
if err != nil {
C.rd_kafka_topic_conf_destroy(cTopicConf)
return err
}
C.rd_kafka_conf_set_default_topic_conf(
(*C.rd_kafka_conf_t)(anyconf.(*rdkConf)),
(*C.rd_kafka_topic_conf_t)((*rdkTopicConf)(cTopicConf)))
default:
err = anyconfSet(anyconf, k, v)
if err != nil {
return err
}
}
}
return nil
}
// convert ConfigMap to C rd_kafka_conf_t *
func (m ConfigMap) convert() (cConf *C.rd_kafka_conf_t, err error) {
cConf = C.rd_kafka_conf_new()
// Set the client.software.name and .version (use librdkafka version).
_, librdkafkaVersion := LibraryVersion()
anyconfSet((*rdkConf)(cConf), "client.software.name", "confluent-kafka-go")
anyconfSet((*rdkConf)(cConf), "client.software.version", librdkafkaVersion)
err = configConvertAnyconf(m, (*rdkConf)(cConf))
if err != nil {
C.rd_kafka_conf_destroy(cConf)
return nil, err
}
return cConf, nil
}
// get finds key in the configmap and returns its value.
// If the key is not found defval is returned.
// If the key is found but the type is mismatched an error is returned.
func (m ConfigMap) get(key string, defval ConfigValue) (ConfigValue, error) {
if strings.HasPrefix(key, "{topic}.") {
defconfCv, found := m["default.topic.config"]
if !found {
return defval, nil
}
return defconfCv.(ConfigMap).get(strings.TrimPrefix(key, "{topic}."), defval)
}
v, ok := m[key]
if !ok {
return defval, nil
}
if defval != nil && reflect.TypeOf(defval) != reflect.TypeOf(v) {
return nil, newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s expects type %T, not %T", key, defval, v))
}
return v, nil
}
// extract performs a get() and if found deletes the key.
func (m ConfigMap) extract(key string, defval ConfigValue) (ConfigValue, error) {
v, err := m.get(key, defval)
if err != nil {
return nil, err
}
delete(m, key)
return v, nil
}
// extractLogConfig extracts generic go.logs.* configuration properties.
func (m ConfigMap) extractLogConfig() (logsChanEnable bool, logsChan chan LogEvent, err error) {
v, err := m.extract("go.logs.channel.enable", false)
if err != nil {
return
}
logsChanEnable = v.(bool)
v, err = m.extract("go.logs.channel", nil)
if err != nil {
return
}
if v != nil {
logsChan = v.(chan LogEvent)
}
if logsChanEnable {
// Tell librdkafka to forward logs to the log queue
m.Set("log.queue=true")
}
return
}
func (m ConfigMap) clone() ConfigMap {
m2 := make(ConfigMap)
for k, v := range m {
m2[k] = v
}
return m2
}
// Get finds the given key in the ConfigMap and returns its value.
// If the key is not found `defval` is returned.
// If the key is found but the type does not match that of `defval` (unless nil)
// an ErrInvalidArg error is returned.
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) {
return m.get(key, defval)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +0,0 @@
/**
* Copyright 2019 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"context"
"time"
)
// Timeout returns the remaining time after which work done on behalf of this context should be
// canceled, or ok==false if no deadline/timeout is set.
func timeout(ctx context.Context) (timeout time.Duration, ok bool) {
if deadline, ok := ctx.Deadline(); ok {
return deadline.Sub(time.Now()), true
}
return 0, false
}

View File

@@ -1,169 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
// Automatically generate error codes from librdkafka
// See README for instructions
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
/*
#include <stdlib.h>
#include "select_rdkafka.h"
*/
import "C"
import (
"fmt"
"unsafe"
)
// Error provides a Kafka-specific error container
type Error struct {
code ErrorCode
str string
fatal bool
retriable bool
txnRequiresAbort bool
}
func newError(code C.rd_kafka_resp_err_t) (err Error) {
return Error{code: ErrorCode(code)}
}
// NewError creates a new Error.
func NewError(code ErrorCode, str string, fatal bool) (err Error) {
return Error{code: code, str: str, fatal: fatal}
}
func newErrorFromString(code ErrorCode, str string) (err Error) {
return Error{code: code, str: str}
}
func newErrorFromCString(code C.rd_kafka_resp_err_t, cstr *C.char) (err Error) {
var str string
if cstr != nil {
str = C.GoString(cstr)
} else {
str = ""
}
return Error{code: ErrorCode(code), str: str}
}
func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) {
return newErrorFromString(ErrorCode(code), str)
}
// newErrorFromCError creates a new Error instance
func newErrorFromCError(cError *C.rd_kafka_error_t) Error {
return Error{
code: ErrorCode(C.rd_kafka_error_code(cError)),
str: C.GoString(C.rd_kafka_error_string(cError)),
fatal: cint2bool(C.rd_kafka_error_is_fatal(cError)),
retriable: cint2bool(C.rd_kafka_error_is_retriable(cError)),
txnRequiresAbort: cint2bool(C.rd_kafka_error_txn_requires_abort(cError)),
}
}
// newErrorFromCErrorDestroy creates a new Error instance and destroys
// the passed cError.
func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error {
defer C.rd_kafka_error_destroy(cError)
return newErrorFromCError(cError)
}
// Error returns a human readable representation of an Error
// Same as Error.String()
func (e Error) Error() string {
return e.String()
}
// String returns a human readable representation of an Error
func (e Error) String() string {
var errstr string
if len(e.str) > 0 {
errstr = e.str
} else {
errstr = e.code.String()
}
if e.IsFatal() {
return fmt.Sprintf("Fatal error: %s", errstr)
}
return errstr
}
// Code returns the ErrorCode of an Error
func (e Error) Code() ErrorCode {
return e.code
}
// IsFatal returns true if the error is a fatal error.
// A fatal error indicates the client instance is no longer operable and
// should be terminated. Typical causes include non-recoverable
// idempotent producer errors.
func (e Error) IsFatal() bool {
return e.fatal
}
// IsRetriable returns true if the operation that caused this error
// may be retried.
// This flag is currently only set by the Transactional producer API.
func (e Error) IsRetriable() bool {
return e.retriable
}
// IsTimeout returns true if the error is a timeout error.
// A timeout error indicates that the operation timed out locally.
func (e Error) IsTimeout() bool {
return e.code == ErrTimedOut || e.code == ErrTimedOutQueue
}
// TxnRequiresAbort returns true if the error is an abortable transaction error
// that requires the application to abort the current transaction with
// AbortTransaction() and start a new transaction with BeginTransaction()
// if it wishes to proceed with transactional operations.
// This flag is only set by the Transactional producer API.
func (e Error) TxnRequiresAbort() bool {
return e.txnRequiresAbort
}
// getFatalError returns an Error object if the client instance has raised a fatal error, else nil.
func getFatalError(H Handle) error {
cErrstr := (*C.char)(C.malloc(C.size_t(512)))
defer C.free(unsafe.Pointer(cErrstr))
cErr := C.rd_kafka_fatal_error(H.gethandle().rk, cErrstr, 512)
if int(cErr) == 0 {
return nil
}
err := newErrorFromCString(cErr, cErrstr)
err.fatal = true
return err
}
// testFatalError triggers a fatal error in the underlying client.
// This is to be used strictly for testing purposes.
func testFatalError(H Handle, code ErrorCode, str string) ErrorCode {
return ErrorCode(C.rd_kafka_test_fatal_error(H.gethandle().rk, C.rd_kafka_resp_err_t(code), C.CString(str)))
}
func getOperationNotAllowedErrorForClosedClient() error {
return newErrorFromString(ErrState, "Operation not allowed on closed client")
}

View File

@@ -1,112 +0,0 @@
/**
* Copyright 2020 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
// Automatically generate error codes from librdkafka
// See README for instructions
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
/*
#include <stdlib.h>
#include "select_rdkafka.h"
static const char *errdesc_to_string (const struct rd_kafka_err_desc *ed, int idx) {
return ed[idx].name;
}
static const char *errdesc_to_desc (const struct rd_kafka_err_desc *ed, int idx) {
return ed[idx].desc;
}
*/
import "C"
import (
"fmt"
"os"
"strings"
"time"
)
// camelCase transforms a snake_case string to camelCase.
func camelCase(s string) string {
ret := ""
for _, v := range strings.Split(s, "_") {
if len(v) == 0 {
continue
}
ret += strings.ToUpper((string)(v[0])) + strings.ToLower(v[1:])
}
return ret
}
// WriteErrorCodes writes Go error code constants to file from the
// librdkafka error codes.
// This function is not intended for public use.
func WriteErrorCodes(f *os.File) {
f.WriteString("package kafka\n\n")
now := time.Now()
f.WriteString(fmt.Sprintf("// Copyright 2016-%d Confluent Inc.\n", now.Year()))
f.WriteString(fmt.Sprintf("// AUTOMATICALLY GENERATED ON %v USING librdkafka %s\n",
now, C.GoString(C.rd_kafka_version_str())))
var errdescs *C.struct_rd_kafka_err_desc
var csize C.size_t
C.rd_kafka_get_err_descs(&errdescs, &csize)
f.WriteString(`
/*
#include "select_rdkafka.h"
*/
import "C"
// ErrorCode is the integer representation of local and broker error codes
type ErrorCode int
// String returns a human readable representation of an error code
func (c ErrorCode) String() string {
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c)))
}
const (
`)
for i := 0; i < int(csize); i++ {
orig := C.GoString(C.errdesc_to_string(errdescs, C.int(i)))
if len(orig) == 0 {
continue
}
desc := C.GoString(C.errdesc_to_desc(errdescs, C.int(i)))
if len(desc) == 0 {
continue
}
errname := "Err" + camelCase(orig)
// Special handling to please golint
// Eof -> EOF
// Id -> ID
errname = strings.Replace(errname, "Eof", "EOF", -1)
errname = strings.Replace(errname, "Id", "ID", -1)
f.WriteString(fmt.Sprintf("\t// %s %s\n", errname, desc))
f.WriteString(fmt.Sprintf("\t%s ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_%s)\n",
errname, orig))
}
f.WriteString(")\n")
}

View File

@@ -1,316 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"fmt"
"os"
"unsafe"
)
/*
#include <stdlib.h>
#include "select_rdkafka.h"
#include "glue_rdkafka.h"
void chdrs_to_tmphdrs (glue_msg_t *gMsg) {
size_t i = 0;
const char *name;
const void *val;
size_t size;
rd_kafka_headers_t *chdrs;
if (rd_kafka_message_headers(gMsg->msg, &chdrs)) {
gMsg->tmphdrs = NULL;
gMsg->tmphdrsCnt = 0;
return;
}
gMsg->tmphdrsCnt = rd_kafka_header_cnt(chdrs);
gMsg->tmphdrs = malloc(sizeof(*gMsg->tmphdrs) * gMsg->tmphdrsCnt);
while (!rd_kafka_header_get_all(chdrs, i,
&gMsg->tmphdrs[i].key,
&gMsg->tmphdrs[i].val,
(size_t *)&gMsg->tmphdrs[i].size))
i++;
}
rd_kafka_event_t *_rk_queue_poll (rd_kafka_queue_t *rkq, int timeoutMs,
rd_kafka_event_type_t *evtype,
glue_msg_t *gMsg,
rd_kafka_event_t *prev_rkev) {
rd_kafka_event_t *rkev;
if (prev_rkev)
rd_kafka_event_destroy(prev_rkev);
rkev = rd_kafka_queue_poll(rkq, timeoutMs);
*evtype = rd_kafka_event_type(rkev);
if (*evtype == RD_KAFKA_EVENT_FETCH) {
gMsg->msg = (rd_kafka_message_t *)rd_kafka_event_message_next(rkev);
gMsg->ts = rd_kafka_message_timestamp(gMsg->msg, &gMsg->tstype);
if (gMsg->want_hdrs)
chdrs_to_tmphdrs(gMsg);
}
return rkev;
}
*/
import "C"
func chdrsToTmphdrs(gMsg *C.glue_msg_t) {
C.chdrs_to_tmphdrs(gMsg)
}
// Event generic interface
type Event interface {
// String returns a human-readable representation of the event
String() string
}
// Specific event types
// Stats statistics event
type Stats struct {
statsJSON string
}
func (e Stats) String() string {
return e.statsJSON
}
// AssignedPartitions consumer group rebalance event: assigned partition set
type AssignedPartitions struct {
Partitions []TopicPartition
}
func (e AssignedPartitions) String() string {
return fmt.Sprintf("AssignedPartitions: %v", e.Partitions)
}
// RevokedPartitions consumer group rebalance event: revoked partition set
type RevokedPartitions struct {
Partitions []TopicPartition
}
func (e RevokedPartitions) String() string {
return fmt.Sprintf("RevokedPartitions: %v", e.Partitions)
}
// PartitionEOF consumer reached end of partition
// Needs to be explicitly enabled by setting the `enable.partition.eof`
// configuration property to true.
type PartitionEOF TopicPartition
func (p PartitionEOF) String() string {
return fmt.Sprintf("EOF at %s", TopicPartition(p))
}
// OffsetsCommitted reports committed offsets
type OffsetsCommitted struct {
Error error
Offsets []TopicPartition
}
func (o OffsetsCommitted) String() string {
return fmt.Sprintf("OffsetsCommitted (%v, %v)", o.Error, o.Offsets)
}
// OAuthBearerTokenRefresh indicates token refresh is required
type OAuthBearerTokenRefresh struct {
// Config is the value of the sasl.oauthbearer.config property
Config string
}
func (o OAuthBearerTokenRefresh) String() string {
return "OAuthBearerTokenRefresh"
}
// eventPoll polls an event from the handler's C rd_kafka_queue_t,
// translates it into an Event type and then sends on `channel` if non-nil, else returns the Event.
// term_chan is an optional channel to monitor along with producing to channel
// to indicate that `channel` is being terminated.
// returns (event Event, terminate Bool) tuple, where Terminate indicates
// if termChan received a termination event.
func (h *handle) eventPoll(channel chan Event, timeoutMs int, maxEvents int, termChan chan bool) (Event, bool) {
var prevRkev *C.rd_kafka_event_t
term := false
var retval Event
if channel == nil {
maxEvents = 1
}
out:
for evcnt := 0; evcnt < maxEvents; evcnt++ {
var evtype C.rd_kafka_event_type_t
var gMsg C.glue_msg_t
gMsg.want_hdrs = C.int8_t(bool2cint(h.msgFields.Headers))
rkev := C._rk_queue_poll(h.rkq, C.int(timeoutMs), &evtype, &gMsg, prevRkev)
prevRkev = rkev
timeoutMs = 0
retval = nil
switch evtype {
case C.RD_KAFKA_EVENT_FETCH:
// Consumer fetch event, new message.
// Extracted into temporary gMsg for optimization
retval = h.newMessageFromGlueMsg(&gMsg)
case C.RD_KAFKA_EVENT_REBALANCE:
// Consumer rebalance event
retval = h.c.handleRebalanceEvent(channel, rkev)
case C.RD_KAFKA_EVENT_ERROR:
// Error event
cErr := C.rd_kafka_event_error(rkev)
if cErr == C.RD_KAFKA_RESP_ERR__PARTITION_EOF {
crktpar := C.rd_kafka_event_topic_partition(rkev)
if crktpar == nil {
break
}
defer C.rd_kafka_topic_partition_destroy(crktpar)
var peof PartitionEOF
setupTopicPartitionFromCrktpar((*TopicPartition)(&peof), crktpar)
retval = peof
} else if int(C.rd_kafka_event_error_is_fatal(rkev)) != 0 {
// A fatal error has been raised.
// Extract the actual error from the client
// instance and return a new Error with
// fatal set to true.
cFatalErrstrSize := C.size_t(512)
cFatalErrstr := (*C.char)(C.malloc(cFatalErrstrSize))
defer C.free(unsafe.Pointer(cFatalErrstr))
cFatalErr := C.rd_kafka_fatal_error(h.rk, cFatalErrstr, cFatalErrstrSize)
fatalErr := newErrorFromCString(cFatalErr, cFatalErrstr)
fatalErr.fatal = true
retval = fatalErr
} else {
retval = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev))
}
case C.RD_KAFKA_EVENT_STATS:
retval = &Stats{C.GoString(C.rd_kafka_event_stats(rkev))}
case C.RD_KAFKA_EVENT_DR:
// Producer Delivery Report event
// Each such event contains delivery reports for all
// messages in the produced batch.
// Forward delivery reports to per-message's response channel
// or to the global Producer.Events channel, or none.
rkmessages := make([]*C.rd_kafka_message_t, int(C.rd_kafka_event_message_count(rkev)))
cnt := int(C.rd_kafka_event_message_array(rkev, (**C.rd_kafka_message_t)(unsafe.Pointer(&rkmessages[0])), C.size_t(len(rkmessages))))
for _, rkmessage := range rkmessages[:cnt] {
msg := h.newMessageFromC(rkmessage)
var ch *chan Event
if rkmessage._private != nil {
// Find cgoif by id
cg, found := h.cgoGet((int)((uintptr)(rkmessage._private)))
if found {
cdr := cg.(cgoDr)
if cdr.deliveryChan != nil {
ch = &cdr.deliveryChan
}
msg.Opaque = cdr.opaque
}
}
if ch == nil && h.fwdDr {
ch = &channel
}
if ch != nil {
select {
case *ch <- msg:
case <-termChan:
retval = nil
term = true
break out
}
} else {
retval = msg
break out
}
}
case C.RD_KAFKA_EVENT_OFFSET_COMMIT:
// Offsets committed
cErr := C.rd_kafka_event_error(rkev)
coffsets := C.rd_kafka_event_topic_partition_list(rkev)
var offsets []TopicPartition
if coffsets != nil {
offsets = newTopicPartitionsFromCparts(coffsets)
}
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
retval = OffsetsCommitted{newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)), offsets}
} else {
retval = OffsetsCommitted{nil, offsets}
}
case C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
ev := OAuthBearerTokenRefresh{C.GoString(C.rd_kafka_event_config_string(rkev))}
retval = ev
case C.RD_KAFKA_EVENT_NONE:
// poll timed out: no events available
break out
default:
if rkev != nil {
fmt.Fprintf(os.Stderr, "Ignored event %s\n",
C.GoString(C.rd_kafka_event_name(rkev)))
}
}
if retval != nil {
if channel != nil {
select {
case channel <- retval:
case <-termChan:
retval = nil
term = true
break out
}
} else {
break out
}
}
}
if prevRkev != nil {
C.rd_kafka_event_destroy(prevRkev)
}
return retval, term
}

View File

@@ -1,340 +0,0 @@
package kafka
// Copyright 2016-2023 Confluent Inc.
// AUTOMATICALLY GENERATED ON 2023-10-25 15:32:05.267754826 +0200 CEST m=+0.000622161 USING librdkafka 2.3.0
/*
#include "select_rdkafka.h"
*/
import "C"
// ErrorCode is the integer representation of local and broker error codes
type ErrorCode int
// String returns a human readable representation of an error code
func (c ErrorCode) String() string {
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c)))
}
const (
// ErrBadMsg Local: Bad message format
ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
// ErrBadCompression Local: Invalid compressed data
ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
// ErrDestroy Local: Broker handle destroyed
ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
// ErrFail Local: Communication failure with broker
ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
// ErrTransport Local: Broker transport failure
ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
// ErrCritSysResource Local: Critical system resource failure
ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
// ErrResolve Local: Host resolution failure
ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
// ErrMsgTimedOut Local: Message timed out
ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
// ErrPartitionEOF Broker: No more messages
ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
// ErrUnknownPartition Local: Unknown partition
ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
// ErrFs Local: File or filesystem error
ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
// ErrUnknownTopic Local: Unknown topic
ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
// ErrAllBrokersDown Local: All broker connections are down
ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
// ErrInvalidArg Local: Invalid argument or configuration
ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
// ErrTimedOut Local: Timed out
ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
// ErrQueueFull Local: Queue full
ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
// ErrIsrInsuff Local: ISR count insufficient
ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
// ErrNodeUpdate Local: Broker node update
ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
// ErrSsl Local: SSL error
ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
// ErrWaitCoord Local: Waiting for coordinator
ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
// ErrUnknownGroup Local: Unknown group
ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
// ErrInProgress Local: Operation in progress
ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
// ErrPrevInProgress Local: Previous operation in progress
ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
// ErrExistingSubscription Local: Existing subscription
ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
// ErrAssignPartitions Local: Assign partitions
ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
// ErrRevokePartitions Local: Revoke partitions
ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
// ErrConflict Local: Conflicting use
ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
// ErrState Local: Erroneous state
ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
// ErrUnknownProtocol Local: Unknown protocol
ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
// ErrNotImplemented Local: Not implemented
ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
// ErrAuthentication Local: Authentication failure
ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
// ErrNoOffset Local: No offset stored
ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
// ErrOutdated Local: Outdated
ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
// ErrTimedOutQueue Local: Timed out in queue
ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
// ErrUnsupportedFeature Local: Required feature not supported by broker
ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE)
// ErrWaitCache Local: Awaiting cache update
ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE)
// ErrIntr Local: Operation interrupted
ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR)
// ErrKeySerialization Local: Key serialization error
ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION)
// ErrValueSerialization Local: Value serialization error
ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION)
// ErrKeyDeserialization Local: Key deserialization error
ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION)
// ErrValueDeserialization Local: Value deserialization error
ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION)
// ErrPartial Local: Partial response
ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL)
// ErrReadOnly Local: Read-only object
ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY)
// ErrNoent Local: No such entry
ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT)
// ErrUnderflow Local: Read underflow
ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW)
// ErrInvalidType Local: Invalid type
ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE)
// ErrRetry Local: Retry operation
ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY)
// ErrPurgeQueue Local: Purged in queue
ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE)
// ErrPurgeInflight Local: Purged in flight
ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT)
// ErrFatal Local: Fatal error
ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL)
// ErrInconsistent Local: Inconsistent state
ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT)
// ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE)
// ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
// ErrUnknownBroker Local: Unknown broker
ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER)
// ErrNotConfigured Local: Functionality not configured
ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED)
// ErrFenced Local: This instance has been fenced by a newer instance
ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED)
// ErrApplication Local: Application generated error
ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION)
// ErrAssignmentLost Local: Group partition assignment lost
ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST)
// ErrNoop Local: No operation performed
ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP)
// ErrAutoOffsetReset Local: No offset to automatically reset to
ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET)
// ErrLogTruncation Local: Partition log truncation detected
ErrLogTruncation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__LOG_TRUNCATION)
// ErrUnknown Unknown broker error
ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
// ErrNoError Success
ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
// ErrOffsetOutOfRange Broker: Offset out of range
ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
// ErrInvalidMsg Broker: Invalid message
ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
// ErrUnknownTopicOrPart Broker: Unknown topic or partition
ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
// ErrInvalidMsgSize Broker: Invalid message size
ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
// ErrLeaderNotAvailable Broker: Leader not available
ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
// ErrNotLeaderForPartition Broker: Not leader for partition
ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
// ErrRequestTimedOut Broker: Request timed out
ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
// ErrBrokerNotAvailable Broker: Broker not available
ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
// ErrReplicaNotAvailable Broker: Replica not available
ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
// ErrMsgSizeTooLarge Broker: Message size too large
ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
// ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
// ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
// ErrNetworkException Broker: Broker disconnected before response received
ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
// ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS)
// ErrCoordinatorNotAvailable Broker: Coordinator not available
ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)
// ErrNotCoordinator Broker: Not coordinator
ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR)
// ErrTopicException Broker: Invalid topic
ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
// ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
// ErrNotEnoughReplicas Broker: Not enough in-sync replicas
ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
// ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
// ErrInvalidRequiredAcks Broker: Invalid required acks value
ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
// ErrIllegalGeneration Broker: Specified group generation id is not valid
ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
// ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
// ErrInvalidGroupID Broker: Invalid group.id
ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
// ErrUnknownMemberID Broker: Unknown member
ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
// ErrInvalidSessionTimeout Broker: Invalid session timeout
ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
// ErrRebalanceInProgress Broker: Group rebalance in progress
ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
// ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
// ErrTopicAuthorizationFailed Broker: Topic authorization failed
ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
// ErrGroupAuthorizationFailed Broker: Group authorization failed
ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
// ErrClusterAuthorizationFailed Broker: Cluster authorization failed
ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
// ErrInvalidTimestamp Broker: Invalid timestamp
ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
// ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
// ErrIllegalSaslState Broker: Request not valid in current SASL state
ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
// ErrUnsupportedVersion Broker: API version not supported
ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
// ErrTopicAlreadyExists Broker: Topic already exists
ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS)
// ErrInvalidPartitions Broker: Invalid number of partitions
ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS)
// ErrInvalidReplicationFactor Broker: Invalid replication factor
ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR)
// ErrInvalidReplicaAssignment Broker: Invalid replica assignment
ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT)
// ErrInvalidConfig Broker: Configuration is invalid
ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG)
// ErrNotController Broker: Not controller for cluster
ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER)
// ErrInvalidRequest Broker: Invalid request
ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST)
// ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT)
// ErrPolicyViolation Broker: Policy violation
ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION)
// ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER)
// ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER)
// ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH)
// ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE)
// ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING)
// ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT)
// ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS)
// ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED)
// ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED)
// ErrSecurityDisabled Broker: Security features are disabled
ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED)
// ErrOperationNotAttempted Broker: Operation not attempted
ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED)
// ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
// ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND)
// ErrSaslAuthenticationFailed Broker: SASL Authentication failed
ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
// ErrUnknownProducerID Broker: Unknown Producer Id
ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID)
// ErrReassignmentInProgress Broker: Partition reassignment is in progress
ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS)
// ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED)
// ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND)
// ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH)
// ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED)
// ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED)
// ErrDelegationTokenExpired Broker: Delegation Token is expired
ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED)
// ErrInvalidPrincipalType Broker: Supplied principalType is not supported
ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE)
// ErrNonEmptyGroup Broker: The group is not empty
ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP)
// ErrGroupIDNotFound Broker: The group id does not exist
ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND)
// ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND)
// ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH)
// ErrListenerNotFound Broker: No matching listener
ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND)
// ErrTopicDeletionDisabled Broker: Topic deletion is disabled
ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED)
// ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH)
// ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH)
// ErrUnsupportedCompressionType Broker: Unsupported compression type
ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE)
// ErrStaleBrokerEpoch Broker: Broker epoch has changed
ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH)
// ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE)
// ErrMemberIDRequired Broker: Group member needs a valid member ID
ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)
// ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE)
// ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED)
// ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
// ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE)
// ErrElectionNotNeeded Broker: Leader election not needed for topic partition
ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED)
// ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS)
// ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC)
// ErrInvalidRecord Broker: Broker failed to validate record
ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD)
// ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
// ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED)
// ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED)
// ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND)
// ErrDuplicateResource Broker: Request illegally referred to the same resource twice
ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE)
// ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL)
// ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET)
// ErrInvalidUpdateVersion Broker: Invalid update version
ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION)
// ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED)
// ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE)
)

View File

@@ -1,48 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* Glue between Go, Cgo and librdkafka
*/
/**
* Temporary C to Go header representation
*/
typedef struct tmphdr_s {
const char *key;
const void *val; // producer: malloc()ed by Go code if size > 0
// consumer: owned by librdkafka
ssize_t size;
} tmphdr_t;
/**
* @struct This is a glue struct used by the C code in this client to
* effectively map fields from a librdkafka rd_kafka_message_t
* to something usable in Go with as few CGo calls as possible.
*/
typedef struct glue_msg_s {
rd_kafka_message_t *msg;
rd_kafka_timestamp_type_t tstype;
int64_t ts;
tmphdr_t *tmphdrs;
size_t tmphdrsCnt;
int8_t want_hdrs; /**< If true, copy headers */
} glue_msg_t;

View File

@@ -1,385 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"fmt"
"strings"
"sync"
"time"
"unsafe"
)
/*
#include "select_rdkafka.h"
#include <stdlib.h>
*/
import "C"
// OAuthBearerToken represents the data to be transmitted
// to a broker during SASL/OAUTHBEARER authentication.
type OAuthBearerToken struct {
// Token value, often (but not necessarily) a JWS compact serialization
// as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
// the regular expression for a SASL/OAUTHBEARER value defined at
// https://tools.ietf.org/html/rfc7628#section-3.1
TokenValue string
// Metadata about the token indicating when it expires (local time);
// it must represent a time in the future
Expiration time.Time
// Metadata about the token indicating the Kafka principal name
// to which it applies (for example, "admin")
Principal string
// SASL extensions, if any, to be communicated to the broker during
// authentication (all keys and values of which must meet the regular
// expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
// and it must not contain the reserved "auth" key)
Extensions map[string]string
}
// Handle represents a generic client handle containing common parts for
// both Producer and Consumer.
type Handle interface {
// SetOAuthBearerToken sets the the data to be transmitted
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
// on success, otherwise an error if:
// 1) the token data is invalid (meaning an expiration time in the past
// or either a token value or an extension key or value that does not meet
// the regular expression requirements as per
// https://tools.ietf.org/html/rfc7628#section-3.1);
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
// authentication mechanism.
SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
// SetOAuthBearerTokenFailure sets the error message describing why token
// retrieval/setting failed; it also schedules a new token refresh event for 10
// seconds later so the attempt may be retried. It will return nil on
// success, otherwise an error if:
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
// authentication mechanism.
SetOAuthBearerTokenFailure(errstr string) error
// gethandle() returns the internal handle struct pointer
gethandle() *handle
// verifyClient() returns the validity of client
verifyClient() error
// IsClosed() returns the bool to check if the client is closed
IsClosed() bool
}
// Common instance handle for both Producer and Consumer
type handle struct {
rk *C.rd_kafka_t
rkq *C.rd_kafka_queue_t
// Forward logs from librdkafka log queue to logs channel.
logs chan LogEvent
logq *C.rd_kafka_queue_t
closeLogsChan bool
// Topic <-> rkt caches
rktCacheLock sync.Mutex
// topic name -> rkt cache
rktCache map[string]*C.rd_kafka_topic_t
// rkt -> topic name cache
rktNameCache map[*C.rd_kafka_topic_t]string
// Cached instance name to avoid CGo call in String()
name string
//
// cgo map
// Maps C callbacks based on cgoid back to its Go object
cgoLock sync.Mutex
cgoidNext uintptr
cgomap map[int]cgoif
//
// producer
//
p *Producer
// Forward delivery reports on Producer.Events channel
fwdDr bool
// Enabled message fields for delivery reports and consumed messages.
msgFields *messageFields
//
// consumer
//
c *Consumer
// WaitGroup to wait for spawned go-routines to finish.
waitGroup sync.WaitGroup
}
func (h *handle) String() string {
return h.name
}
func (h *handle) setup() {
h.rktCache = make(map[string]*C.rd_kafka_topic_t)
h.rktNameCache = make(map[*C.rd_kafka_topic_t]string)
h.cgomap = make(map[int]cgoif)
h.name = C.GoString(C.rd_kafka_name(h.rk))
if h.msgFields == nil {
h.msgFields = newMessageFields()
}
}
func (h *handle) cleanup() {
if h.logs != nil {
C.rd_kafka_queue_destroy(h.logq)
if h.closeLogsChan {
close(h.logs)
}
}
for _, crkt := range h.rktCache {
C.rd_kafka_topic_destroy(crkt)
}
if h.rkq != nil {
C.rd_kafka_queue_destroy(h.rkq)
}
}
func (h *handle) setupLogQueue(logsChan chan LogEvent, termChan chan bool) {
if logsChan == nil {
logsChan = make(chan LogEvent, 10000)
h.closeLogsChan = true
}
h.logs = logsChan
// Let librdkafka forward logs to our log queue instead of the main queue
h.logq = C.rd_kafka_queue_new(h.rk)
C.rd_kafka_set_log_queue(h.rk, h.logq)
// Start a polling goroutine to consume the log queue
h.waitGroup.Add(1)
go func() {
h.pollLogEvents(h.logs, 100, termChan)
h.waitGroup.Done()
}()
}
// getRkt0 finds or creates and returns a C topic_t object from the local cache.
func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) {
if doLock {
h.rktCacheLock.Lock()
defer h.rktCacheLock.Unlock()
}
crkt, ok := h.rktCache[topic]
if ok {
return crkt
}
if ctopic == nil {
ctopic = C.CString(topic)
defer C.free(unsafe.Pointer(ctopic))
}
crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil)
if crkt == nil {
panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s",
topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error()))))
}
h.rktCache[topic] = crkt
h.rktNameCache[crkt] = topic
return crkt
}
// getRkt finds or creates and returns a C topic_t object from the local cache.
func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) {
return h.getRkt0(topic, nil, true)
}
// getTopicNameFromRkt returns the topic name for a C topic_t object, preferably
// using the local cache to avoid a cgo call.
func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) {
h.rktCacheLock.Lock()
defer h.rktCacheLock.Unlock()
topic, ok := h.rktNameCache[crkt]
if ok {
return topic
}
// we need our own copy/refcount of the crkt
ctopic := C.rd_kafka_topic_name(crkt)
topic = C.GoString(ctopic)
crkt = h.getRkt0(topic, ctopic, false /* dont lock */)
return topic
}
// cgoif is a generic interface for holding Go state passed as opaque
// value to the C code.
// Since pointers to complex Go types cannot be passed to C we instead create
// a cgoif object, generate a unique id that is added to the cgomap,
// and then pass that id to the C code. When the C code callback is called we
// use the id to look up the cgoif object in the cgomap.
type cgoif interface{}
// delivery report cgoif container
type cgoDr struct {
deliveryChan chan Event
opaque interface{}
}
// cgoPut adds object cg to the handle's cgo map and returns a
// unique id for the added entry.
// Thread-safe.
// FIXME: the uniquity of the id is questionable over time.
func (h *handle) cgoPut(cg cgoif) (cgoid int) {
h.cgoLock.Lock()
defer h.cgoLock.Unlock()
h.cgoidNext++
if h.cgoidNext == 0 {
h.cgoidNext++
}
cgoid = (int)(h.cgoidNext)
h.cgomap[cgoid] = cg
return cgoid
}
// cgoGet looks up cgoid in the cgo map, deletes the reference from the map
// and returns the object, if found. Else returns nil, false.
// Thread-safe.
func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) {
if cgoid == 0 {
return nil, false
}
h.cgoLock.Lock()
defer h.cgoLock.Unlock()
cg, found = h.cgomap[cgoid]
if found {
delete(h.cgomap, cgoid)
}
return cg, found
}
// setOauthBearerToken - see rd_kafka_oauthbearer_set_token()
func (h *handle) setOAuthBearerToken(oauthBearerToken OAuthBearerToken) error {
cTokenValue := C.CString(oauthBearerToken.TokenValue)
defer C.free(unsafe.Pointer(cTokenValue))
cPrincipal := C.CString(oauthBearerToken.Principal)
defer C.free(unsafe.Pointer(cPrincipal))
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cExtensions := make([]*C.char, 2*len(oauthBearerToken.Extensions))
extensionSize := 0
for key, value := range oauthBearerToken.Extensions {
cExtensions[extensionSize] = C.CString(key)
defer C.free(unsafe.Pointer(cExtensions[extensionSize]))
extensionSize++
cExtensions[extensionSize] = C.CString(value)
defer C.free(unsafe.Pointer(cExtensions[extensionSize]))
extensionSize++
}
var cExtensionsToUse **C.char
if extensionSize > 0 {
cExtensionsToUse = (**C.char)(unsafe.Pointer(&cExtensions[0]))
}
cErr := C.rd_kafka_oauthbearer_set_token(h.rk, cTokenValue,
C.int64_t(oauthBearerToken.Expiration.UnixNano()/(1000*1000)), cPrincipal,
cExtensionsToUse, C.size_t(extensionSize), cErrstr, cErrstrSize)
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR {
return nil
}
return newErrorFromCString(cErr, cErrstr)
}
// setOauthBearerTokenFailure - see rd_kafka_oauthbearer_set_token_failure()
func (h *handle) setOAuthBearerTokenFailure(errstr string) error {
cerrstr := C.CString(errstr)
defer C.free(unsafe.Pointer(cerrstr))
cErr := C.rd_kafka_oauthbearer_set_token_failure(h.rk, cerrstr)
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR {
return nil
}
return newError(cErr)
}
// messageFields controls which fields are made available for producer delivery reports & consumed messages.
// true values indicate that the field should be included
type messageFields struct {
Key bool
Value bool
Headers bool
}
// disableAll disable all fields
func (mf *messageFields) disableAll() {
mf.Key = false
mf.Value = false
mf.Headers = false
}
// newMessageFields returns a new messageFields with all fields enabled
func newMessageFields() *messageFields {
return &messageFields{
Key: true,
Value: true,
Headers: true,
}
}
// newMessageFieldsFrom constructs a new messageFields from the given configuration value
func newMessageFieldsFrom(v ConfigValue) (*messageFields, error) {
msgFields := newMessageFields()
switch v {
case "all":
// nothing to do
case "", "none":
msgFields.disableAll()
default:
msgFields.disableAll()
for _, value := range strings.Split(v.(string), ",") {
switch value {
case "key":
msgFields.Key = true
case "value":
msgFields.Value = true
case "headers":
msgFields.Headers = true
default:
return nil, fmt.Errorf("unknown message field: %s", value)
}
}
}
return msgFields, nil
}

View File

@@ -1,67 +0,0 @@
/**
* Copyright 2018 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"fmt"
"strconv"
)
/*
#include <string.h>
#include "select_rdkafka.h"
#include "glue_rdkafka.h"
*/
import "C"
// Header represents a single Kafka message header.
//
// Message headers are made up of a list of Header elements, retaining their original insert
// order and allowing for duplicate Keys.
//
// Key is a human readable string identifying the header.
// Value is the key's binary value, Kafka does not put any restrictions on the format of
// of the Value but it should be made relatively compact.
// The value may be a byte array, empty, or nil.
//
// NOTE: Message headers are not available on producer delivery report messages.
type Header struct {
Key string // Header name (utf-8 string)
Value []byte // Header value (nil, empty, or binary)
}
// String returns the Header Key and data in a human representable possibly truncated form
// suitable for displaying to the user.
func (h Header) String() string {
if h.Value == nil {
return fmt.Sprintf("%s=nil", h.Key)
}
valueLen := len(h.Value)
if valueLen == 0 {
return fmt.Sprintf("%s=<empty>", h.Key)
}
truncSize := valueLen
trunc := ""
if valueLen > 50+15 {
truncSize = 50
trunc = fmt.Sprintf("(%d more bytes)", valueLen-truncSize)
}
return fmt.Sprintf("%s=%s%s", h.Key, strconv.Quote(string(h.Value[:truncSize])), trunc)
}

View File

@@ -1,483 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package kafka provides high-level Apache Kafka producer and consumers
// using bindings on-top of the librdkafka C library.
//
// # High-level Consumer
//
// * Decide if you want to read messages and events by calling `.Poll()` or
// the deprecated option of using the `.Events()` channel. (If you want to use
// `.Events()` channel then set `"go.events.channel.enable": true`).
//
// * Create a Consumer with `kafka.NewConsumer()` providing at
// least the `bootstrap.servers` and `group.id` configuration properties.
//
// * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics)
// to join the group with the specified subscription set.
// Subscriptions are atomic, calling `.Subscribe*()` again will leave
// the group and rejoin with the new set of topics.
//
// * Start reading events and messages from either the `.Events` channel
// or by calling `.Poll()`.
//
// * When the group has rebalanced each client member is assigned a
// (sub-)set of topic+partitions.
// By default the consumer will start fetching messages for its assigned
// partitions at this point, but your application may enable rebalance
// events to get an insight into what the assigned partitions where
// as well as set the initial offsets. To do this you need to pass
// `"go.application.rebalance.enable": true` to the `NewConsumer()` call
// mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event
// with the assigned partition set. You can optionally modify the initial
// offsets (they'll default to stored offsets and if there are no previously stored
// offsets it will fall back to `"auto.offset.reset"`
// which defaults to the `latest` message) and then call `.Assign(partitions)`
// to start consuming. If you don't need to modify the initial offsets you will
// not need to call `.Assign()`, the client will do so automatically for you if
// you dont, unless you are using the channel-based consumer in which case
// you MUST call `.Assign()` when receiving the `AssignedPartitions` and
// `RevokedPartitions` events.
//
// * As messages are fetched they will be made available on either the
// `.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`.
//
// * Handle messages, events and errors to your liking.
//
// * When you are done consuming call `.Close()` to commit final offsets
// and leave the consumer group.
//
// # Producer
//
// * Create a Producer with `kafka.NewProducer()` providing at least
// the `bootstrap.servers` configuration properties.
//
// * Messages may now be produced either by sending a `*kafka.Message`
// on the `.ProduceChannel` or by calling `.Produce()`.
//
// * Producing is an asynchronous operation so the client notifies the application
// of per-message produce success or failure through something called delivery reports.
// Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message`
// and you should check `msg.TopicPartition.Error` for `nil` to find out if the message
// was succesfully delivered or not.
// It is also possible to direct delivery reports to alternate channels
// by providing a non-nil `chan Event` channel to `.Produce()`.
// If no delivery reports are wanted they can be completely disabled by
// setting configuration property `"go.delivery.reports": false`.
//
// * When you are done producing messages you will need to make sure all messages
// are indeed delivered to the broker (or failed), remember that this is
// an asynchronous client so some of your messages may be lingering in internal
// channels or tranmission queues.
// To do this you can either keep track of the messages you've produced
// and wait for their corresponding delivery reports, or call the convenience
// function `.Flush()` that will block until all message deliveries are done
// or the provided timeout elapses.
//
// * Finally call `.Close()` to decommission the producer.
//
// # Transactional producer API
//
// The transactional producer operates on top of the idempotent producer,
// and provides full exactly-once semantics (EOS) for Apache Kafka when used
// with the transaction aware consumer (`isolation.level=read_committed`).
//
// A producer instance is configured for transactions by setting the
// `transactional.id` to an identifier unique for the application. This
// id will be used to fence stale transactions from previous instances of
// the application, typically following an outage or crash.
//
// After creating the transactional producer instance using `NewProducer()`
// the transactional state must be initialized by calling
// `InitTransactions()`. This is a blocking call that will
// acquire a runtime producer id from the transaction coordinator broker
// as well as abort any stale transactions and fence any still running producer
// instances with the same `transactional.id`.
//
// Once transactions are initialized the application may begin a new
// transaction by calling `BeginTransaction()`.
// A producer instance may only have one single on-going transaction.
//
// Any messages produced after the transaction has been started will
// belong to the ongoing transaction and will be committed or aborted
// atomically.
// It is not permitted to produce messages outside a transaction
// boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`,
// `AbortTransaction()` or if the current transaction has failed.
//
// If consumed messages are used as input to the transaction, the consumer
// instance must be configured with `enable.auto.commit` set to `false`.
// To commit the consumed offsets along with the transaction pass the
// list of consumed partitions and the last offset processed + 1 to
// `SendOffsetsToTransaction()` prior to committing the transaction.
// This allows an aborted transaction to be restarted using the previously
// committed offsets.
//
// To commit the produced messages, and any consumed offsets, to the
// current transaction, call `CommitTransaction()`.
// This call will block until the transaction has been fully committed or
// failed (typically due to fencing by a newer producer instance).
//
// Alternatively, if processing fails, or an abortable transaction error is
// raised, the transaction needs to be aborted by calling
// `AbortTransaction()` which marks any produced messages and
// offset commits as aborted.
//
// After the current transaction has been committed or aborted a new
// transaction may be started by calling `BeginTransaction()` again.
//
// Retriable errors:
// Some error cases allow the attempted operation to be retried, this is
// indicated by the error object having the retriable flag set which can
// be detected by calling `err.(kafka.Error).IsRetriable()`.
// When this flag is set the application may retry the operation immediately
// or preferably after a shorter grace period (to avoid busy-looping).
// Retriable errors include timeouts, broker transport failures, etc.
//
// Abortable errors:
// An ongoing transaction may fail permanently due to various errors,
// such as transaction coordinator becoming unavailable, write failures to the
// Apache Kafka log, under-replicated partitions, etc.
// At this point the producer application must abort the current transaction
// using `AbortTransaction()` and optionally start a new transaction
// by calling `BeginTransaction()`.
// Whether an error is abortable or not is detected by calling
// `err.(kafka.Error).TxnRequiresAbort()` on the returned error object.
//
// Fatal errors:
// While the underlying idempotent producer will typically only raise
// fatal errors for unrecoverable cluster errors where the idempotency
// guarantees can't be maintained, most of these are treated as abortable by
// the transactional producer since transactions may be aborted and retried
// in their entirety;
// The transactional producer on the other hand introduces a set of additional
// fatal errors which the application needs to handle by shutting down the
// producer and terminate. There is no way for a producer instance to recover
// from fatal errors.
// Whether an error is fatal or not is detected by calling
// `err.(kafka.Error).IsFatal()` on the returned error object or by checking
// the global `GetFatalError()`.
//
// Handling of other errors:
// For errors that have neither retriable, abortable or the fatal flag set
// it is not always obvious how to handle them. While some of these errors
// may be indicative of bugs in the application code, such as when
// an invalid parameter is passed to a method, other errors might originate
// from the broker and be passed thru as-is to the application.
// The general recommendation is to treat these errors, that have
// neither the retriable or abortable flags set, as fatal.
//
// Error handling example:
//
// retry:
//
// err := producer.CommitTransaction(...)
// if err == nil {
// return nil
// } else if err.(kafka.Error).TxnRequiresAbort() {
// do_abort_transaction_and_reset_inputs()
// } else if err.(kafka.Error).IsRetriable() {
// goto retry
// } else { // treat all other errors as fatal errors
// panic(err)
// }
//
// # Events
//
// Apart from emitting messages and delivery reports the client also communicates
// with the application through a number of different event types.
// An application may choose to handle or ignore these events.
//
// # Consumer events
//
// * `*kafka.Message` - a fetched message.
//
// * `AssignedPartitions` - The assigned partition set for this client following a rebalance.
// Requires `go.application.rebalance.enable`
//
// * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance.
// `AssignedPartitions` and `RevokedPartitions` are symmetrical.
// Requires `go.application.rebalance.enable`
//
// * `PartitionEOF` - Consumer has reached the end of a partition.
// NOTE: The consumer will keep trying to fetch new messages for the partition.
//
// * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled).
//
// # Producer events
//
// * `*kafka.Message` - delivery report for produced message.
// Check `.TopicPartition.Error` for delivery result.
//
// # Generic events for both Consumer and Producer
//
// * `KafkaError` - client (error codes are prefixed with _) or broker error.
// These errors are normally just informational since the
// client will try its best to automatically recover (eventually).
//
// * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required.
// This event only occurs with sasl.mechanism=OAUTHBEARER.
// Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient
// instance when a successful token retrieval is completed, otherwise be sure to
// invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or
// if setting the token failed, which could happen if an extension doesn't meet
// the required regular expression); invoking SetOAuthBearerTokenFailure() will
// schedule a new event for 10 seconds later so another retrieval can be attempted.
//
// Hint: If your application registers a signal notification
// (signal.Notify) makes sure the signals channel is buffered to avoid
// possible complications with blocking Poll() calls.
//
// Note: The Confluent Kafka Go client is safe for concurrent use.
package kafka
import (
"fmt"
"unsafe"
// Make sure librdkafka_vendor/ sub-directory is included in vendor pulls.
_ "github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor"
)
/*
#include <stdlib.h>
#include <string.h>
#include "select_rdkafka.h"
static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) {
return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL;
}
static const rd_kafka_group_result_t *
group_result_by_idx (const rd_kafka_group_result_t **groups, size_t cnt, size_t idx) {
if (idx >= cnt)
return NULL;
return groups[idx];
}
*/
import "C"
// PartitionAny represents any partition (for partitioning),
// or unspecified value (for all other cases)
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
// TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset.
type TopicPartition struct {
Topic *string
Partition int32
Offset Offset
Metadata *string
Error error
LeaderEpoch *int32 // LeaderEpoch or nil if not available
}
func (p TopicPartition) String() string {
topic := "<null>"
if p.Topic != nil {
topic = *p.Topic
}
if p.Error != nil {
return fmt.Sprintf("%s[%d]@%s(%s)",
topic, p.Partition, p.Offset, p.Error)
}
return fmt.Sprintf("%s[%d]@%s",
topic, p.Partition, p.Offset)
}
// TopicPartitions is a slice of TopicPartitions that also implements
// the sort interface
type TopicPartitions []TopicPartition
func (tps TopicPartitions) Len() int {
return len(tps)
}
func (tps TopicPartitions) Less(i, j int) bool {
if *tps[i].Topic < *tps[j].Topic {
return true
} else if *tps[i].Topic > *tps[j].Topic {
return false
}
return tps[i].Partition < tps[j].Partition
}
func (tps TopicPartitions) Swap(i, j int) {
tps[i], tps[j] = tps[j], tps[i]
}
// Node represents a Kafka broker.
type Node struct {
// Node id.
ID int
// Node host.
Host string
// Node port.
Port int
// Node rack (may be nil)
Rack *string
}
func (n Node) String() string {
return fmt.Sprintf("[%s:%d]/%d", n.Host, n.Port, n.ID)
}
// UUID Kafka UUID representation
type UUID struct {
// Most Significant Bits.
mostSignificantBits int64
// Least Significant Bits.
leastSignificantBits int64
// Base64 representation
base64str string
}
// Base64 string representation of the UUID
func (uuid UUID) String() string {
return uuid.base64str
}
// GetMostSignificantBits returns Most Significant 64 bits of the 128 bits UUID
func (uuid UUID) GetMostSignificantBits() int64 {
return uuid.mostSignificantBits
}
// GetLeastSignificantBits returns Least Significant 64 bits of the 128 bits UUID
func (uuid UUID) GetLeastSignificantBits() int64 {
return uuid.leastSignificantBits
}
// ConsumerGroupTopicPartitions represents a consumer group's TopicPartitions.
type ConsumerGroupTopicPartitions struct {
// Group name
Group string
// Partitions list
Partitions []TopicPartition
}
func (gtp ConsumerGroupTopicPartitions) String() string {
res := gtp.Group
res += "[ "
for _, tp := range gtp.Partitions {
res += tp.String() + " "
}
res += "]"
return res
}
// new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t
// from a TopicPartition array.
func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) {
cparts = C.rd_kafka_topic_partition_list_new(C.int(len(partitions)))
for _, part := range partitions {
ctopic := C.CString(*part.Topic)
defer C.free(unsafe.Pointer(ctopic))
rktpar := C.rd_kafka_topic_partition_list_add(cparts, ctopic, C.int32_t(part.Partition))
rktpar.offset = C.int64_t(part.Offset)
if part.Metadata != nil {
cmetadata := C.CString(*part.Metadata)
rktpar.metadata = unsafe.Pointer(cmetadata)
rktpar.metadata_size = C.size_t(len(*part.Metadata))
}
if part.LeaderEpoch != nil {
cLeaderEpoch := C.int32_t(*part.LeaderEpoch)
C.rd_kafka_topic_partition_set_leader_epoch(rktpar, cLeaderEpoch)
}
}
return cparts
}
func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kafka_topic_partition_t) {
topic := C.GoString(crktpar.topic)
partition.Topic = &topic
partition.Partition = int32(crktpar.partition)
partition.Offset = Offset(crktpar.offset)
if crktpar.metadata_size > 0 {
size := C.int(crktpar.metadata_size)
cstr := (*C.char)(unsafe.Pointer(crktpar.metadata))
metadata := C.GoStringN(cstr, size)
partition.Metadata = &metadata
}
if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR {
partition.Error = newError(crktpar.err)
}
cLeaderEpoch := int32(C.rd_kafka_topic_partition_get_leader_epoch(crktpar))
if cLeaderEpoch >= 0 {
partition.LeaderEpoch = &cLeaderEpoch
}
}
func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) {
partcnt := int(cparts.cnt)
partitions = make([]TopicPartition, partcnt)
for i := 0; i < partcnt; i++ {
crktpar := C._c_rdkafka_topic_partition_list_entry(cparts, C.int(i))
setupTopicPartitionFromCrktpar(&partitions[i], crktpar)
}
return partitions
}
// cToConsumerGroupTopicPartitions converts a C rd_kafka_group_result_t array to a
// ConsumerGroupTopicPartitions slice.
func (a *AdminClient) cToConsumerGroupTopicPartitions(
cGroupResults **C.rd_kafka_group_result_t,
cGroupCount C.size_t) (result []ConsumerGroupTopicPartitions) {
result = make([]ConsumerGroupTopicPartitions, uint(cGroupCount))
for i := uint(0); i < uint(cGroupCount); i++ {
cGroupResult := C.group_result_by_idx(cGroupResults, cGroupCount, C.size_t(i))
cGroupPartitions := C.rd_kafka_group_result_partitions(cGroupResult)
result[i] = ConsumerGroupTopicPartitions{
Group: C.GoString(C.rd_kafka_group_result_name(cGroupResult)),
Partitions: newTopicPartitionsFromCparts(cGroupPartitions),
}
}
return
}
// LibraryVersion returns the underlying librdkafka library version as a
// (version_int, version_str) tuple.
func LibraryVersion() (int, string) {
ver := (int)(C.rd_kafka_version())
verstr := C.GoString(C.rd_kafka_version_str())
return ver, verstr
}
// setSaslCredentials sets the SASL credentials used for the specified Kafka client.
// The new credentials will overwrite the old ones (which were set when creating the
// client or by a previous call to setSaslCredentials). The new credentials will be
// used the next time the client needs to establish a connection to the broker. This
// function will *not* break existing broker connections that were established with the
// old credentials. This method applies only to the SASL PLAIN and SCRAM mechanisms.
func setSaslCredentials(rk *C.rd_kafka_t, username, password string) error {
cUsername := C.CString(username)
defer C.free(unsafe.Pointer(cUsername))
cPassword := C.CString(password)
defer C.free(unsafe.Pointer(cPassword))
if err := C.rd_kafka_sasl_set_credentials(rk, cUsername, cPassword); err != nil {
return newErrorFromCErrorDestroy(err)
}
return nil
}

View File

@@ -1,3 +0,0 @@
*.tar.gz
*.tgz
tmp*

View File

@@ -1,393 +0,0 @@
LICENSE
--------------------------------------------------------------
librdkafka - Apache Kafka C driver library
Copyright (c) 2012-2022, Magnus Edenhill
2023, Confluent Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
LICENSE.cjson
--------------------------------------------------------------
For cJSON.c and cJSON.h:
Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
LICENSE.crc32c
--------------------------------------------------------------
# For src/crc32c.c copied (with modifications) from
# http://stackoverflow.com/a/17646775/1821055
/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
* Copyright (C) 2013 Mark Adler
* Version 1.1 1 Aug 2013 Mark Adler
*/
/*
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler
madler@alumni.caltech.edu
*/
LICENSE.fnv1a
--------------------------------------------------------------
parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c
Please do not copyright this code. This code is in the public domain.
LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
By:
chongo <Landon Curt Noll> /\oo/\
http://www.isthe.com/chongo/
Share and Enjoy! :-)
LICENSE.hdrhistogram
--------------------------------------------------------------
This license covers src/rdhdrhistogram.c which is a C port of
Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram
at revision 3a0bb77429bd3a61596f5e8a3172445844342120
-----------------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2014 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE
LICENSE.lz4
--------------------------------------------------------------
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
LZ4 Library
Copyright (c) 2011-2016, Yann Collet
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
LICENSE.murmur2
--------------------------------------------------------------
parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
MurMurHash2 Library
//-----------------------------------------------------------------------------
// MurmurHash2 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
LICENSE.pycrc
--------------------------------------------------------------
The following license applies to the files rdcrc32.c and rdcrc32.h which
have been generated by the pycrc tool.
============================================================================
Copyright (c) 2006-2012, Thomas Pircher <tehpeh@gmx.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
LICENSE.queue
--------------------------------------------------------------
For sys/queue.h:
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
* $FreeBSD$
LICENSE.regexp
--------------------------------------------------------------
regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
"
These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
"
LICENSE.snappy
--------------------------------------------------------------
######################################################################
# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h #
# originally retrieved from http://github.com/andikleen/snappy-c #
# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 #
######################################################################
The snappy-c code is under the same license as the original snappy source
Copyright 2011 Intel Corporation All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
LICENSE.tinycthread
--------------------------------------------------------------
From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
License
-------
Copyright (c) 2012 Marcus Geelnard
2013-2014 Evan Nemerson
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
LICENSE.wingetopt
--------------------------------------------------------------
For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
/*
* Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Sponsored in part by the Defense Advanced Research Projects
* Agency (DARPA) and Air Force Research Laboratory, Air Force
* Materiel Command, USAF, under agreement number F39502-99-1-0512.
*/
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Dieter Baron and Thomas Klausner.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/

View File

@@ -1,25 +0,0 @@
# Bundling prebuilt librdkafka
confluent-kafka-go bundles prebuilt statically linked
versions of librdkafka for the following platforms:
* MacOSX x64, arm64 (aka Darwin)
* Linux glibc x64, arm64 (Ubuntu, CentOS, etc)
* Linux musl x64, arm64 (Alpine)
* Windows x64
## Import static librdkafka bundle
First create the static librdkafka bundle following the instructions in
librdkafka's packaging/nuget/README.md.
Then import the new version by using the import.sh script here, this script
will create a branch, import the bundle, create a commit and push the
branch to Github for PR review. This PR must be manually opened, reviewed
and then finally merged (make sure to merge it, DO NOT squash or rebase).
$ ./import.sh ~/path/to/librdkafka-static-bundle-v1.4.0.tgz
This will copy the static library and the rdkafka.h header file
to this directory, as well as generate a new ../build_..go file
for this platform + variant.

View File

@@ -1,121 +0,0 @@
#!/bin/bash
#
# Updates the bundled prebuilt librdkafka libraries to specified version.
#
set -e
usage() {
echo "Usage: $0 librdkafka-static-bundle-<VERSION>.tgz"
echo ""
echo "This tool must be run from the TOPDIR/kafka/librdkafka_vendor directory"
exit 1
}
# Parse dynamic libraries from linker command line.
# Will print a list matching -lfoo and -framework X..
parse_dynlibs() {
local libs=
while [[ $# -gt 0 ]]; do
if [[ $1 == -l* ]]; then
libs="${libs} $1"
elif [[ $1 == -framework ]]; then
libs="${libs} $1 $2"
shift # remove one (extra) arg
fi
shift # remove one arg
done
echo "$libs"
}
# Parse dynamic library dependecies from pkg-config file and print
# them to stdout.
parse_pc_dynlibs() {
local pc=$1
parse_dynlibs $(sed -n 's/^Libs: \(..*\)/\1/p' "$pc")
}
setup_build() {
# Copies static library from the temp directory into final location,
# extracts dynamic lib list from the pkg-config file,
# and generates the build_..go file
local btype=$1
local apath=$2
local pc=$3
local srcinfo=$4
local build_tag=
local gpath="../build_${btype}.go"
local dpath="librdkafka_${btype}.a"
if [[ $btype =~ ^glibc_linux.*$ ]]; then
build_tag="// +build !musl"
elif [[ $btype =~ ^musl_linux.*$ ]]; then
build_tag="// +build musl"
fi
local dynlibs=$(parse_pc_dynlibs $pc)
echo "Copying $apath to $dpath"
cp "$apath" "$dpath"
echo "Generating $gpath (extra build tag: $build_tag)"
cat >$gpath <<EOF
// +build !dynamic
$build_tag
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: \${SRCDIR}/librdkafka_vendor/${dpath} $dynlibs
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static ${btype} from ${srcinfo}"
EOF
git add "$dpath" "$gpath"
}
bundle="$1"
[[ -f $bundle ]] || usage
bundlename=$(basename "$bundle")
bdir=$(mktemp -d tmpXXXXXX)
echo "Extracting bundle $bundle:"
tar -xzvf "$bundle" -C "$bdir/"
echo "Copying librdkafka files"
for f in rdkafka.h LICENSES.txt ; do
cp $bdir/$f . || true
git add "$f"
done
for btype in glibc_linux_amd64 \
glibc_linux_arm64 \
musl_linux_amd64 \
musl_linux_arm64 \
darwin_amd64 \
darwin_arm64 \
windows ; do
lib=$bdir/librdkafka_${btype}.a
pc=${lib/%.a/.pc}
[[ -f $lib ]] || (echo "Expected file $lib missing" ; exit 1)
[[ -f $pc ]] || (echo "Expected file $pc missing" ; exit 1)
setup_build $btype $lib $pc $bundlename
done
rm -rf "$bdir"
echo "All done"

View File

@@ -1,113 +0,0 @@
#!/bin/bash
#
#
# Import a new version of librdkafka based on a librdkafka static bundle.
# This will create a separate branch, import librdkafka, make a commit,
# and then ask you to push the branch to github, have it reviewed,
# and then later merged (NOT squashed or rebased).
# Having a merge per import allows future shallow clones to skip and ignore
# older imports, hopefully reducing the amount of git history data 'go get'
# needs to download.
set -e
usage() {
echo "Usage: $0 [--devel] path/to/librdkafka-static-bundle-<VERSION>.tgz"
echo ""
echo "This tool must be run from the TOPDIR/kafka/librdkafka directory"
echo ""
echo "Options:"
echo " --devel - Development use: No branch checks and does not push to github"
exit 1
}
error_cleanup() {
echo "Error occurred, cleaning up"
git checkout $currbranch
git branch -D $import_branch
exit 1
}
devel=0
if [[ $1 == --devel ]]; then
devel=1
shift
fi
bundle="$1"
[[ -f $bundle ]] || usage
# Parse the librdkafka version from the bundle
bundlename=$(basename $bundle)
version=${bundlename#librdkafka-static-bundle-}
version=${version%.tgz}
if [[ -z $version ]]; then
echo "Error: Could not parse version from bundle $bundle"
exit 1
fi
# Verify branch state
curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-)
uncommitted=$(git status --untracked-files=no --porcelain)
if [[ ! -z $uncommitted ]]; then
echo "Error: This script must be run on a clean branch with no uncommitted changes"
echo "Uncommitted files:"
echo "$uncommitted"
exit 1
fi
if [[ $devel != 1 ]] && [[ $curr_branch != master ]] ; then
echo "Error: This script must be run on an up-to-date, clean, master branch"
exit 1
fi
# Create import branch, import bundle, commit.
import_branch="import_$version"
exists=$(git branch -rlq | grep "/$import_branch\$" || true)
if [[ ! -z $exists ]]; then
echo "Error: This version branch already seems to exist: $exists: already imorted?"
[[ $devel != 1 ]] && exit 1
fi
echo "Checking for existing commits that match this version (should be none)"
git log --oneline | grep "^librdkafka static bundle $version\$" && exit 1
echo "Creating import branch $import_branch"
git checkout -b $import_branch
echo "Importing bundle $bundle"
./bundle-import.sh "$bundle" || error_cleanup
echo "Committing $version"
git commit -a -m "librdkafka static bundle $version" || error_cleanup
echo "Updating error codes and docs"
pushd ../../
make -f mk/Makefile docs || error_cleanup
git commit -a -m "Documentation and error code update for librdkafka $version" \
|| error_cleanup
popd
if [[ $devel != 1 ]]; then
echo "Pushing branch"
git push origin $import_branch || error_cleanup
fi
git checkout $curr_branch
if [[ $devel != 1 ]]; then
git branch -D $import_branch
fi
echo ""
echo "############## IMPORT OF $version COMPLETE ##############"
if [[ $devel != 1 ]]; then
echo "Branch $import_branch has been pushed."
echo "Create a PR, have it reviewed and then merge it (do NOT squash or rebase)."
fi

View File

@@ -1,21 +0,0 @@
/**
* Copyright 2020 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package librdkafka
// LibrdkafkaGoSubdir is a dummy variable needed to export something so the
// file is not empty.
var LibrdkafkaGoSubdir = true

File diff suppressed because it is too large Load Diff

View File

@@ -1,331 +0,0 @@
/*
* librdkafka - Apache Kafka C library
*
* Copyright (c) 2019 Magnus Edenhill
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RDKAFKA_MOCK_H_
#define _RDKAFKA_MOCK_H_
#ifndef _RDKAFKA_H_
#error "rdkafka_mock.h must be included after rdkafka.h"
#endif
#ifdef __cplusplus
extern "C" {
#if 0
} /* Restore indent */
#endif
#endif
/**
* @name Mock cluster
*
* Provides a mock Kafka cluster with a configurable number of brokers
* that support a reasonable subset of Kafka protocol operations,
* error injection, etc.
*
* There are two ways to use the mock clusters, the most simple approach
* is to configure `test.mock.num.brokers` (to e.g. 3) on the rd_kafka_t
* in an existing application, which will replace the configured
* `bootstrap.servers` with the mock cluster brokers.
* This approach is convenient to easily test existing applications.
*
* The second approach is to explicitly create a mock cluster on an
* rd_kafka_t instance by using rd_kafka_mock_cluster_new().
*
* Mock clusters provide localhost listeners that can be used as the bootstrap
* servers by multiple rd_kafka_t instances.
*
* Currently supported functionality:
* - Producer
* - Idempotent Producer
* - Transactional Producer
* - Low-level consumer
* - High-level balanced consumer groups with offset commits
* - Topic Metadata and auto creation
*
* @remark High-level consumers making use of the balanced consumer groups
* are not supported.
*
* @remark This is an experimental public API that is NOT covered by the
* librdkafka API or ABI stability guarantees.
*
*
* @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
*
* @{
*/
typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
/**
* @brief Create new mock cluster with \p broker_cnt brokers.
*
* The broker ids will start at 1 up to and including \p broker_cnt.
*
* The \p rk instance is required for internal book keeping but continues
* to operate as usual.
*/
RD_EXPORT
rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk,
int broker_cnt);
/**
* @brief Destroy mock cluster.
*/
RD_EXPORT
void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster);
/**
* @returns the rd_kafka_t instance for a cluster as passed to
* rd_kafka_mock_cluster_new().
*/
RD_EXPORT rd_kafka_t *
rd_kafka_mock_cluster_handle (const rd_kafka_mock_cluster_t *mcluster);
/**
* @returns the rd_kafka_mock_cluster_t instance as created by
* setting the `test.mock.num.brokers` configuration property,
* or NULL if no such instance.
*/
RD_EXPORT rd_kafka_mock_cluster_t *
rd_kafka_handle_mock_cluster (const rd_kafka_t *rk);
/**
* @returns the mock cluster's bootstrap.servers list
*/
RD_EXPORT const char *
rd_kafka_mock_cluster_bootstraps (const rd_kafka_mock_cluster_t *mcluster);
/**
* @brief Clear the cluster's error state for the given \p ApiKey.
*/
RD_EXPORT
void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey);
/**
* @brief Push \p cnt errors in the \p ... va-arg list onto the cluster's
* error stack for the given \p ApiKey.
*
* \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
*
* The following \p cnt protocol requests matching \p ApiKey will fail with the
* provided error code and removed from the stack, starting with
* the first error code, then the second, etc.
*
* Passing \c RD_KAFKA_RESP_ERR__TRANSPORT will make the mock broker
* disconnect the client which can be useful to trigger a disconnect on certain
* requests.
*/
RD_EXPORT
void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey, size_t cnt, ...);
/**
* @brief Same as rd_kafka_mock_push_request_errors() but takes
* an array of errors.
*/
RD_EXPORT void
rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey,
size_t cnt,
const rd_kafka_resp_err_t *errors);
/**
* @brief Push \p cnt errors and RTT tuples in the \p ... va-arg list onto
* the broker's error stack for the given \p ApiKey.
*
* \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
*
* Each entry is a tuple of:
* rd_kafka_resp_err_t err - error to return (or 0)
* int rtt_ms - response RTT/delay in milliseconds (or 0)
*
* The following \p cnt protocol requests matching \p ApiKey will fail with the
* provided error code and removed from the stack, starting with
* the first error code, then the second, etc.
*
* @remark The broker errors take precedence over the cluster errors.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id,
int16_t ApiKey, size_t cnt, ...);
/**
* @brief Set the topic error to return in protocol requests.
*
* Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest.
*/
RD_EXPORT
void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster,
const char *topic,
rd_kafka_resp_err_t err);
/**
* @brief Creates a topic.
*
* This is an alternative to automatic topic creation as performed by
* the client itself.
*
* @remark The Topic Admin API (CreateTopics) is not supported by the
* mock broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_topic_create (rd_kafka_mock_cluster_t *mcluster,
const char *topic, int partition_cnt,
int replication_factor);
/**
* @brief Sets the partition leader.
*
* The topic will be created if it does not exist.
*
* \p broker_id needs to be an existing broker, or -1 to make the
* partition leader-less.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_partition_set_leader (rd_kafka_mock_cluster_t *mcluster,
const char *topic, int32_t partition,
int32_t broker_id);
/**
* @brief Sets the partition's preferred replica / follower.
*
* The topic will be created if it does not exist.
*
* \p broker_id does not need to point to an existing broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_partition_set_follower (rd_kafka_mock_cluster_t *mcluster,
const char *topic, int32_t partition,
int32_t broker_id);
/**
* @brief Sets the partition's preferred replica / follower low and high
* watermarks.
*
* The topic will be created if it does not exist.
*
* Setting an offset to -1 will revert back to the leader's corresponding
* watermark.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_partition_set_follower_wmarks (rd_kafka_mock_cluster_t *mcluster,
const char *topic,
int32_t partition,
int64_t lo, int64_t hi);
/**
* @brief Disconnects the broker and disallows any new connections.
* This does NOT trigger leader change.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_down (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id);
/**
* @brief Makes the broker accept connections again.
* This does NOT trigger leader change.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_up (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id);
/**
* @brief Set broker round-trip-time delay in milliseconds.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_rtt (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id, int rtt_ms);
/**
* @brief Sets the broker's rack as reported in Metadata to the client.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_rack (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id, const char *rack);
/**
* @brief Explicitly sets the coordinator. If this API is not a standard
* hashing scheme will be used.
*
* @param key_type "transaction" or "group"
* @param key The transactional.id or group.id
* @param broker_id The new coordinator, does not have to be a valid broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_coordinator_set (rd_kafka_mock_cluster_t *mcluster,
const char *key_type, const char *key,
int32_t broker_id);
/**
* @brief Set the allowed ApiVersion range for \p ApiKey.
*
* Set \p MinVersion and \p MaxVersion to -1 to disable the API
* completely.
*
* \p MaxVersion MUST not exceed the maximum implemented value,
* see rdkafka_mock_handlers.c.
*
* @param ApiKey Protocol request type/key
* @param MinVersion Minimum version supported (or -1 to disable).
* @param MinVersion Maximum version supported (or -1 to disable).
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_set_apiversion (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey,
int16_t MinVersion, int16_t MaxVersion);
/**@}*/
#ifdef __cplusplus
}
#endif
#endif /* _RDKAFKA_MOCK_H_ */

View File

@@ -1,89 +0,0 @@
package kafka
import (
"fmt"
"time"
)
/*
#include "select_rdkafka.h"
*/
import "C"
// LogEvent represent the log from librdkafka internal log queue
type LogEvent struct {
Name string // Name of client instance
Tag string // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
Message string // Log message
Level int // Log syslog level, lower is more critical.
Timestamp time.Time // Log timestamp
}
// newLogEvent creates a new LogEvent from the given rd_kafka_event_t.
//
// This function does not take ownership of the cEvent pointer. You need to
// free its resources using C.rd_kafka_event_destroy afterwards.
//
// The cEvent object needs to be of type C.RD_KAFKA_EVENT_LOG. Calling this
// function with an object of another type has undefined behaviour.
func (h *handle) newLogEvent(cEvent *C.rd_kafka_event_t) LogEvent {
var tag, message *C.char
var level C.int
C.rd_kafka_event_log(cEvent, &(tag), &(message), &(level))
return LogEvent{
Name: h.name,
Tag: C.GoString(tag),
Message: C.GoString(message),
Level: int(level),
Timestamp: time.Now(),
}
}
// pollLogEvents polls log events from librdkafka and pushes them to toChannel,
// until doneChan is closed.
//
// Each call to librdkafka times out after timeoutMs. If a call to librdkafka
// is ongoing when doneChan is closed, the function will wait until the call
// returns or times out, whatever happens first.
func (h *handle) pollLogEvents(toChannel chan LogEvent, timeoutMs int, doneChan chan bool) {
for {
select {
case <-doneChan:
return
default:
cEvent := C.rd_kafka_queue_poll(h.logq, C.int(timeoutMs))
if cEvent == nil {
continue
}
if C.rd_kafka_event_type(cEvent) != C.RD_KAFKA_EVENT_LOG {
C.rd_kafka_event_destroy(cEvent)
continue
}
logEvent := h.newLogEvent(cEvent)
C.rd_kafka_event_destroy(cEvent)
select {
case <-doneChan:
return
case toChannel <- logEvent:
continue
}
}
}
}
func (logEvent LogEvent) String() string {
return fmt.Sprintf(
"[%v][%s][%s][%d]%s",
logEvent.Timestamp.Format(time.RFC3339),
logEvent.Name,
logEvent.Tag,
logEvent.Level,
logEvent.Message)
}

View File

@@ -1,229 +0,0 @@
package kafka
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"fmt"
"time"
"unsafe"
)
/*
#include <string.h>
#include <stdlib.h>
#include "select_rdkafka.h"
#include "glue_rdkafka.h"
void setup_rkmessage (rd_kafka_message_t *rkmessage,
rd_kafka_topic_t *rkt, int32_t partition,
const void *payload, size_t len,
void *key, size_t keyLen, void *opaque) {
rkmessage->rkt = rkt;
rkmessage->partition = partition;
rkmessage->payload = (void *)payload;
rkmessage->len = len;
rkmessage->key = (void *)key;
rkmessage->key_len = keyLen;
rkmessage->_private = opaque;
}
*/
import "C"
// TimestampType is a the Message timestamp type or source
type TimestampType int
const (
// TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
// TimestampCreateTime indicates timestamp set by producer (source time)
TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
// TimestampLogAppendTime indicates timestamp set set by broker (store time)
TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
)
func (t TimestampType) String() string {
switch t {
case TimestampCreateTime:
return "CreateTime"
case TimestampLogAppendTime:
return "LogAppendTime"
case TimestampNotAvailable:
fallthrough
default:
return "NotAvailable"
}
}
// Message represents a Kafka message
type Message struct {
TopicPartition TopicPartition
Value []byte
Key []byte
Timestamp time.Time
TimestampType TimestampType
Opaque interface{}
Headers []Header
LeaderEpoch *int32 // Deprecated: LeaderEpoch or nil if not available. Use m.TopicPartition.LeaderEpoch instead.
}
// String returns a human readable representation of a Message.
// Key and payload are not represented.
func (m *Message) String() string {
var topic string
if m.TopicPartition.Topic != nil {
topic = *m.TopicPartition.Topic
} else {
topic = ""
}
return fmt.Sprintf("%s[%d]@%s", topic, m.TopicPartition.Partition, m.TopicPartition.Offset)
}
func (h *handle) getRktFromMessage(msg *Message) (crkt *C.rd_kafka_topic_t) {
if msg.TopicPartition.Topic == nil {
return nil
}
return h.getRkt(*msg.TopicPartition.Topic)
}
// setupHeadersFromGlueMsg converts the C tmp headers in gMsg to
// Go Headers in msg.
// gMsg.tmphdrs will be freed.
func setupHeadersFromGlueMsg(msg *Message, gMsg *C.glue_msg_t) {
msg.Headers = make([]Header, gMsg.tmphdrsCnt)
for n := range msg.Headers {
tmphdr := (*[1 << 30]C.tmphdr_t)(unsafe.Pointer(gMsg.tmphdrs))[n]
msg.Headers[n].Key = C.GoString(tmphdr.key)
if tmphdr.val != nil {
msg.Headers[n].Value = C.GoBytes(unsafe.Pointer(tmphdr.val), C.int(tmphdr.size))
} else {
msg.Headers[n].Value = nil
}
}
C.free(unsafe.Pointer(gMsg.tmphdrs))
}
func (h *handle) newMessageFromGlueMsg(gMsg *C.glue_msg_t) (msg *Message) {
msg = &Message{}
if gMsg.ts != -1 {
ts := int64(gMsg.ts)
msg.TimestampType = TimestampType(gMsg.tstype)
msg.Timestamp = time.Unix(ts/1000, (ts%1000)*1000000)
}
if gMsg.tmphdrsCnt > 0 {
setupHeadersFromGlueMsg(msg, gMsg)
}
h.setupMessageFromC(msg, gMsg.msg)
return msg
}
// setupMessageFromC sets up a message object from a C rd_kafka_message_t
func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) {
if cmsg.rkt != nil {
topic := h.getTopicNameFromRkt(cmsg.rkt)
msg.TopicPartition.Topic = &topic
}
msg.TopicPartition.Partition = int32(cmsg.partition)
if cmsg.payload != nil && h.msgFields.Value {
msg.Value = C.GoBytes(unsafe.Pointer(cmsg.payload), C.int(cmsg.len))
}
if cmsg.key != nil && h.msgFields.Key {
msg.Key = C.GoBytes(unsafe.Pointer(cmsg.key), C.int(cmsg.key_len))
}
if h.msgFields.Headers {
var gMsg C.glue_msg_t
gMsg.msg = cmsg
gMsg.want_hdrs = C.int8_t(1)
chdrsToTmphdrs(&gMsg)
if gMsg.tmphdrsCnt > 0 {
setupHeadersFromGlueMsg(msg, &gMsg)
}
}
msg.TopicPartition.Offset = Offset(cmsg.offset)
if cmsg.err != 0 {
msg.TopicPartition.Error = newError(cmsg.err)
}
leaderEpoch := int32(C.rd_kafka_message_leader_epoch(cmsg))
if leaderEpoch >= 0 {
msg.LeaderEpoch = &leaderEpoch
msg.TopicPartition.LeaderEpoch = &leaderEpoch
}
}
// newMessageFromC creates a new message object from a C rd_kafka_message_t
// NOTE: For use with Producer: does not set message timestamp fields.
func (h *handle) newMessageFromC(cmsg *C.rd_kafka_message_t) (msg *Message) {
msg = &Message{}
h.setupMessageFromC(msg, cmsg)
return msg
}
// messageToC sets up cmsg as a clone of msg
func (h *handle) messageToC(msg *Message, cmsg *C.rd_kafka_message_t) {
var valp unsafe.Pointer
var keyp unsafe.Pointer
// to circumvent Cgo constraints we need to allocate C heap memory
// for both Value and Key (one allocation back to back)
// and copy the bytes from Value and Key to the C memory.
// We later tell librdkafka (in produce()) to free the
// C memory pointer when it is done.
var payload unsafe.Pointer
valueLen := 0
keyLen := 0
if msg.Value != nil {
valueLen = len(msg.Value)
}
if msg.Key != nil {
keyLen = len(msg.Key)
}
allocLen := valueLen + keyLen
if allocLen > 0 {
payload = C.malloc(C.size_t(allocLen))
if valueLen > 0 {
copy((*[1 << 30]byte)(payload)[0:valueLen], msg.Value)
valp = payload
}
if keyLen > 0 {
copy((*[1 << 30]byte)(payload)[valueLen:allocLen], msg.Key)
keyp = unsafe.Pointer(&((*[1 << 31]byte)(payload)[valueLen]))
}
}
cmsg.rkt = h.getRktFromMessage(msg)
cmsg.partition = C.int32_t(msg.TopicPartition.Partition)
cmsg.payload = valp
cmsg.len = C.size_t(valueLen)
cmsg.key = keyp
cmsg.key_len = C.size_t(keyLen)
cmsg._private = nil
}
// used for testing messageToC performance
func (h *handle) messageToCDummy(msg *Message) {
var cmsg C.rd_kafka_message_t
h.messageToC(msg, &cmsg)
}

View File

@@ -1,180 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"unsafe"
)
/*
#include <stdlib.h>
#include "select_rdkafka.h"
struct rd_kafka_metadata_broker *_getMetadata_broker_element(struct rd_kafka_metadata *m, int i) {
return &m->brokers[i];
}
struct rd_kafka_metadata_topic *_getMetadata_topic_element(struct rd_kafka_metadata *m, int i) {
return &m->topics[i];
}
struct rd_kafka_metadata_partition *_getMetadata_partition_element(struct rd_kafka_metadata *m, int topic_idx, int partition_idx) {
return &m->topics[topic_idx].partitions[partition_idx];
}
int32_t _get_int32_element (int32_t *arr, int i) {
return arr[i];
}
*/
import "C"
// BrokerMetadata contains per-broker metadata
type BrokerMetadata struct {
ID int32
Host string
Port int
}
// PartitionMetadata contains per-partition metadata
type PartitionMetadata struct {
ID int32
Error Error
Leader int32
Replicas []int32
Isrs []int32
}
// TopicMetadata contains per-topic metadata
type TopicMetadata struct {
Topic string
Partitions []PartitionMetadata
Error Error
}
// Metadata contains broker and topic metadata for all (matching) topics
type Metadata struct {
Brokers []BrokerMetadata
Topics map[string]TopicMetadata
OriginatingBroker BrokerMetadata
}
// getMetadata queries broker for cluster and topic metadata.
// If topic is non-nil only information about that topic is returned, else if
// allTopics is false only information about locally used topics is returned,
// else information about all topics is returned.
func getMetadata(H Handle, topic *string, allTopics bool, timeoutMs int) (*Metadata, error) {
h := H.gethandle()
var rkt *C.rd_kafka_topic_t
if topic != nil {
rkt = h.getRkt(*topic)
}
var cMd *C.struct_rd_kafka_metadata
cErr := C.rd_kafka_metadata(h.rk, bool2cint(allTopics),
rkt, &cMd, C.int(timeoutMs))
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return nil, newError(cErr)
}
m := Metadata{}
defer C.rd_kafka_metadata_destroy(cMd)
m.Brokers = make([]BrokerMetadata, cMd.broker_cnt)
for i := 0; i < int(cMd.broker_cnt); i++ {
b := C._getMetadata_broker_element(cMd, C.int(i))
m.Brokers[i] = BrokerMetadata{int32(b.id), C.GoString(b.host),
int(b.port)}
}
m.Topics = make(map[string]TopicMetadata, int(cMd.topic_cnt))
for i := 0; i < int(cMd.topic_cnt); i++ {
t := C._getMetadata_topic_element(cMd, C.int(i))
thisTopic := C.GoString(t.topic)
m.Topics[thisTopic] = TopicMetadata{Topic: thisTopic,
Error: newError(t.err),
Partitions: make([]PartitionMetadata, int(t.partition_cnt))}
for j := 0; j < int(t.partition_cnt); j++ {
p := C._getMetadata_partition_element(cMd, C.int(i), C.int(j))
m.Topics[thisTopic].Partitions[j] = PartitionMetadata{
ID: int32(p.id),
Error: newError(p.err),
Leader: int32(p.leader)}
m.Topics[thisTopic].Partitions[j].Replicas = make([]int32, int(p.replica_cnt))
for ir := 0; ir < int(p.replica_cnt); ir++ {
m.Topics[thisTopic].Partitions[j].Replicas[ir] = int32(C._get_int32_element(p.replicas, C.int(ir)))
}
m.Topics[thisTopic].Partitions[j].Isrs = make([]int32, int(p.isr_cnt))
for ii := 0; ii < int(p.isr_cnt); ii++ {
m.Topics[thisTopic].Partitions[j].Isrs[ii] = int32(C._get_int32_element(p.isrs, C.int(ii)))
}
}
}
m.OriginatingBroker = BrokerMetadata{int32(cMd.orig_broker_id),
C.GoString(cMd.orig_broker_name), 0}
return &m, nil
}
// queryWatermarkOffsets returns the broker's low and high offsets for the given topic
// and partition.
func queryWatermarkOffsets(H Handle, topic string, partition int32, timeoutMs int) (low, high int64, err error) {
h := H.gethandle()
ctopic := C.CString(topic)
defer C.free(unsafe.Pointer(ctopic))
var cLow, cHigh C.int64_t
e := C.rd_kafka_query_watermark_offsets(h.rk, ctopic, C.int32_t(partition),
&cLow, &cHigh, C.int(timeoutMs))
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return 0, 0, newError(e)
}
low = int64(cLow)
high = int64(cHigh)
return low, high, nil
}
// getWatermarkOffsets returns the clients cached low and high offsets for the given topic
// and partition.
func getWatermarkOffsets(H Handle, topic string, partition int32) (low, high int64, err error) {
h := H.gethandle()
ctopic := C.CString(topic)
defer C.free(unsafe.Pointer(ctopic))
var cLow, cHigh C.int64_t
e := C.rd_kafka_get_watermark_offsets(h.rk, ctopic, C.int32_t(partition),
&cLow, &cHigh)
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return 0, 0, newError(e)
}
low = int64(cLow)
high = int64(cHigh)
return low, high, nil
}

View File

@@ -1,35 +0,0 @@
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import "C"
// bool2int converts a bool to a C.int (1 or 0)
func bool2cint(b bool) C.int {
if b {
return 1
}
return 0
}
// cint2bool converts a C.int to a bool
func cint2bool(v C.int) bool {
if v == 0 {
return false
}
return true
}

View File

@@ -1,134 +0,0 @@
/**
* Copyright 2023 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"time"
"unsafe"
)
/*
#include <stdlib.h>
#include "select_rdkafka.h"
#include "glue_rdkafka.h"
*/
import "C"
// MockCluster represents a Kafka mock cluster instance which can be used
// for testing.
type MockCluster struct {
rk *C.rd_kafka_t
mcluster *C.rd_kafka_mock_cluster_t
}
// NewMockCluster provides a mock Kafka cluster with a configurable
// number of brokers that support a reasonable subset of Kafka protocol
// operations, error injection, etc.
//
// The broker ids will start at 1 up to and including brokerCount.
//
// Mock clusters provide localhost listeners that can be used as the bootstrap
// servers by multiple Kafka client instances.
//
// Currently supported functionality:
// - Producer
// - Idempotent Producer
// - Transactional Producer
// - Low-level consumer
// - High-level balanced consumer groups with offset commits
// - Topic Metadata and auto creation
//
// Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
func NewMockCluster(brokerCount int) (*MockCluster, error) {
mc := &MockCluster{}
cErrstr := (*C.char)(C.malloc(C.size_t(512)))
defer C.free(unsafe.Pointer(cErrstr))
cConf := C.rd_kafka_conf_new()
mc.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256)
if mc.rk == nil {
C.rd_kafka_conf_destroy(cConf)
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
}
mc.mcluster = C.rd_kafka_mock_cluster_new(mc.rk, C.int(brokerCount))
if mc.mcluster == nil {
C.rd_kafka_destroy(mc.rk)
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
}
return mc, nil
}
// BootstrapServers returns the bootstrap.servers property for this MockCluster
func (mc *MockCluster) BootstrapServers() string {
return C.GoString(C.rd_kafka_mock_cluster_bootstraps(mc.mcluster))
}
// SetRoundtripDuration sets the broker round-trip-time delay for the given broker.
// Use brokerID -1 for all brokers, or >= 0 for a specific broker.
func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error {
durationInMillis := C.int(duration.Milliseconds())
cError := C.rd_kafka_mock_broker_set_rtt(mc.mcluster, C.int(brokerID), durationInMillis)
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return newError(cError)
}
return nil
}
// SetBrokerDown disconnects the broker and disallows any new connections.
// This does NOT trigger leader change.
// Use brokerID -1 for all brokers, or >= 0 for a specific broker.
func (mc *MockCluster) SetBrokerDown(brokerID int) error {
cError := C.rd_kafka_mock_broker_set_down(mc.mcluster, C.int(brokerID))
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return newError(cError)
}
return nil
}
// SetBrokerUp makes the broker accept connections again.
// This does NOT trigger leader change.
// Use brokerID -1 for all brokers, or >= 0 for a specific broker.
func (mc *MockCluster) SetBrokerUp(brokerID int) error {
cError := C.rd_kafka_mock_broker_set_up(mc.mcluster, C.int(brokerID))
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return newError(cError)
}
return nil
}
// CreateTopic creates a topic without having to use a producer
func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error {
topicStr := C.CString(topic)
defer C.free(unsafe.Pointer(topicStr))
cError := C.rd_kafka_mock_topic_create(mc.mcluster, topicStr, C.int(partitions), C.int(replicationFactor))
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return newError(cError)
}
return nil
}
// Close and destroy the MockCluster
func (mc *MockCluster) Close() {
C.rd_kafka_mock_cluster_destroy(mc.mcluster)
C.rd_kafka_destroy(mc.rk)
}

View File

@@ -1,147 +0,0 @@
/**
* Copyright 2017 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"fmt"
"strconv"
)
/*
#include <stdlib.h>
#include "select_rdkafka.h"
static int64_t _c_rdkafka_offset_tail(int64_t rel) {
return RD_KAFKA_OFFSET_TAIL(rel);
}
*/
import "C"
// Offset type (int64) with support for canonical names
type Offset int64
// OffsetBeginning represents the earliest offset (logical)
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
// OffsetEnd represents the latest offset (logical)
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
// OffsetInvalid represents an invalid/unspecified offset
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
// OffsetStored represents a stored offset
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
func (o Offset) String() string {
switch o {
case OffsetBeginning:
return "beginning"
case OffsetEnd:
return "end"
case OffsetInvalid:
return "unset"
case OffsetStored:
return "stored"
default:
return fmt.Sprintf("%d", int64(o))
}
}
// Set offset value, see NewOffset()
func (o *Offset) Set(offset interface{}) error {
n, err := NewOffset(offset)
if err == nil {
*o = n
}
return err
}
// NewOffset creates a new Offset using the provided logical string, an
// absolute int64 offset value, or a concrete Offset type.
// Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored"
func NewOffset(offset interface{}) (Offset, error) {
switch v := offset.(type) {
case string:
switch v {
case "beginning":
fallthrough
case "earliest":
return Offset(OffsetBeginning), nil
case "end":
fallthrough
case "latest":
return Offset(OffsetEnd), nil
case "unset":
fallthrough
case "invalid":
return Offset(OffsetInvalid), nil
case "stored":
return Offset(OffsetStored), nil
default:
off, err := strconv.Atoi(v)
return Offset(off), err
}
case int:
return Offset((int64)(v)), nil
case int64:
return Offset(v), nil
case Offset:
return Offset(v), nil
default:
return OffsetInvalid, newErrorFromString(ErrInvalidArg,
fmt.Sprintf("Invalid offset type: %t", v))
}
}
// OffsetTail returns the logical offset relativeOffset from current end of partition
func OffsetTail(relativeOffset Offset) Offset {
return Offset(C._c_rdkafka_offset_tail(C.int64_t(relativeOffset)))
}
// offsetsForTimes looks up offsets by timestamp for the given partitions.
//
// The returned offset for each partition is the earliest offset whose
// timestamp is greater than or equal to the given timestamp in the
// corresponding partition. If the provided timestamp exceeds that of the
// last message in the partition, a value of -1 will be returned.
//
// The timestamps to query are represented as `.Offset` in the `times`
// argument and the looked up offsets are represented as `.Offset` in the returned
// `offsets` list.
//
// The function will block for at most timeoutMs milliseconds.
//
// Duplicate Topic+Partitions are not supported.
// Per-partition errors may be returned in the `.Error` field.
func offsetsForTimes(H Handle, times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) {
cparts := newCPartsFromTopicPartitions(times)
defer C.rd_kafka_topic_partition_list_destroy(cparts)
cerr := C.rd_kafka_offsets_for_times(H.gethandle().rk, cparts, C.int(timeoutMs))
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
return nil, newError(cerr)
}
return newTopicPartitionsFromCparts(cparts), nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +0,0 @@
/**
* Copyright 2020 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file uses a preprocessor macro defined by the various build_*.go
// files to determine whether to import the bundled librdkafka header, or
// the system one.
// This is needed because cgo will automatically add -I. to the include
// path, so <librdkafka/rdkafka.h> would find a bundled header instead of
// the system one if it were called librdkafka/rdkafka.h instead of
// librdkafka_vendor/rdkafka.h
#ifdef USE_VENDORED_LIBRDKAFKA
#include "librdkafka_vendor/rdkafka.h"
#include "librdkafka_vendor/rdkafka_mock.h"
#else
#include <librdkafka/rdkafka.h>
#include <librdkafka/rdkafka_mock.h>
#endif

View File

@@ -1,12 +0,0 @@
{
"Brokers": "mybroker or $BROKERS env",
"BrokersSasl": "mybroker or $BROKERSSASL env",
"SaslUsername": "testuser",
"SaslPassword": "testpass",
"SaslMechanism": "PLAIN",
"TopicName": "test",
"GroupID": "testgroup",
"PerfMsgCount": 1000000,
"PerfMsgSize": 100,
"Config": ["api.version.request=true"]
}

View File

@@ -1,55 +0,0 @@
/**
* Copyright 2019 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import "C"
import (
"context"
"time"
)
const (
cTimeoutInfinite = C.int(-1) // Blocks indefinitely until completion.
cTimeoutNoWait = C.int(0) // Returns immediately without blocking.
)
// cTimeoutFromContext returns the remaining time after which work done on behalf of this context
// should be canceled, in milliseconds.
//
// If no deadline/timeout is set, or if the timeout does not fit in an int32, it returns
// cTimeoutInfinite;
// If there is no time left in this context, it returns cTimeoutNoWait.
func cTimeoutFromContext(ctx context.Context) C.int {
if ctx == nil {
return cTimeoutInfinite
}
timeout, hasTimeout := timeout(ctx)
if !hasTimeout {
return cTimeoutInfinite
}
if timeout <= 0 {
return cTimeoutNoWait
}
timeoutMs := int64(timeout / time.Millisecond)
if int64(int32(timeoutMs)) < timeoutMs {
return cTimeoutInfinite
}
return C.int(timeoutMs)
}

16
vendor/modules.txt vendored
View File

@@ -35,13 +35,10 @@ github.com/cenkalti/backoff/v4
# github.com/cespare/xxhash/v2 v2.2.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
# github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991
## explicit; go 1.18
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2
# github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995
## explicit; go 1.18
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2
# github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf
# github.com/cloudevents/sdk-go/v2 v2.15.2
## explicit; go 1.18
github.com/cloudevents/sdk-go/v2
github.com/cloudevents/sdk-go/v2/binding
@@ -57,10 +54,6 @@ github.com/cloudevents/sdk-go/v2/event/datacodec/xml
github.com/cloudevents/sdk-go/v2/protocol
github.com/cloudevents/sdk-go/v2/protocol/http
github.com/cloudevents/sdk-go/v2/types
# github.com/confluentinc/confluent-kafka-go/v2 v2.3.0
## explicit; go 1.17
github.com/confluentinc/confluent-kafka-go/v2/kafka
github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor
# github.com/coreos/go-semver v0.3.1
## explicit; go 1.8
github.com/coreos/go-semver/semver
@@ -240,10 +233,6 @@ github.com/mitchellh/copystructure
# github.com/mitchellh/reflectwalk v1.0.2
## explicit
github.com/mitchellh/reflectwalk
# github.com/moby/patternmatcher v0.6.0
## explicit; go 1.19
# github.com/moby/sys/sequential v0.5.0
## explicit; go 1.17
# github.com/mochi-mqtt/server/v2 v2.4.6
## explicit; go 1.21
github.com/mochi-mqtt/server/v2
@@ -1589,7 +1578,7 @@ open-cluster-management.io/api/utils/work/v1/workapplier
open-cluster-management.io/api/utils/work/v1/workvalidator
open-cluster-management.io/api/work/v1
open-cluster-management.io/api/work/v1alpha1
# open-cluster-management.io/sdk-go v0.13.1-0.20240422015316-b61db1fa9e71
# open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090
## explicit; go 1.21
open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1
open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1
@@ -1602,7 +1591,6 @@ open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types

View File

@@ -129,6 +129,8 @@ func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error {
return err
}
klog.V(4).Infof("Sent event: %v\n%s", ctx, evt)
// make sure the current client is the newest
c.RLock()
defer c.RUnlock()
@@ -137,7 +139,6 @@ func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error {
return fmt.Errorf("the cloudevents client is not ready")
}
klog.V(4).Infof("Sending event: %v\n%s", sendingCtx, evt)
if result := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) {
return fmt.Errorf("failed to send event %s, %v", evt, result)
}
@@ -166,7 +167,6 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) {
if cloudEventsClient != nil {
go func() {
if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) {
klog.V(4).Infof("Received event: %s", evt)
receive(receiverCtx, evt)
}); err != nil {
runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err))

View File

@@ -1,97 +0,0 @@
package kafka
import (
"context"
"fmt"
"strings"
cloudevents "github.com/cloudevents/sdk-go/v2"
cloudeventscontext "github.com/cloudevents/sdk-go/v2/context"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
confluent "github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
)
type kafkaAgentOptions struct {
configMap *kafka.ConfigMap
clusterName string
agentID string
errorChan chan error
}
func NewAgentOptions(configMap *kafka.ConfigMap, clusterName, agentID string) *options.CloudEventsAgentOptions {
kafkaAgentOptions := &kafkaAgentOptions{
configMap: configMap,
clusterName: clusterName,
agentID: agentID,
errorChan: make(chan error),
}
groupID, err := configMap.Get("group.id", "")
if groupID == "" || err != nil {
_ = configMap.SetKey("group.id", agentID)
}
return &options.CloudEventsAgentOptions{
CloudEventsOptions: kafkaAgentOptions,
AgentID: agentID,
ClusterName: clusterName,
}
}
// encode the source and agent to the message key
func (o *kafkaAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) {
eventType, err := types.ParseCloudEventsType(evtCtx.GetType())
if err != nil {
return nil, err
}
// agent publishes event to status topic to send the resource status from a specified cluster
originalSource, err := evtCtx.GetExtension(types.ExtensionOriginalSource)
if err != nil {
return nil, err
}
if eventType.Action == types.ResyncRequestAction && originalSource == types.SourceAll {
// TODO support multiple sources, agent may need a source list instead of the broadcast
topic := strings.Replace(agentBroadcastTopic, "*", o.clusterName, 1)
return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), o.clusterName), nil
}
topic := strings.Replace(agentEventsTopic, "*", fmt.Sprintf("%s", originalSource), 1)
topic = strings.Replace(topic, "*", o.clusterName, 1)
messageKey := fmt.Sprintf("%s@%s", originalSource, o.clusterName)
return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), messageKey), nil
}
func (o *kafkaAgentOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) {
protocol, err := confluent.New(confluent.WithConfigMap(o.configMap),
confluent.WithReceiverTopics([]string{
fmt.Sprintf("^%s", replaceLast(sourceEventsTopic, "*", o.clusterName)),
fmt.Sprintf("^%s", sourceBroadcastTopic),
}),
confluent.WithSenderTopic("agentevents"),
confluent.WithErrorHandler(func(ctx context.Context, err kafka.Error) {
o.errorChan <- err
}))
if err != nil {
return nil, err
}
producerEvents, _ := protocol.Events()
handleProduceEvents(producerEvents, o.errorChan)
return protocol, nil
}
func (o *kafkaAgentOptions) ErrorChan() <-chan error {
return o.errorChan
}
func replaceLast(str, old, new string) string {
last := strings.LastIndex(str, old)
if last == -1 {
return str
}
return str[:last] + new + str[last+len(old):]
}

View File

@@ -1,122 +0,0 @@
package kafka
import (
"fmt"
"os"
"gopkg.in/yaml.v2"
"k8s.io/klog/v2"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
)
const (
// sourceEventsTopic is a topic for sources to publish their resource create/update/delete events, the first
// asterisk is a wildcard for source, the second asterisk is a wildcard for cluster.
sourceEventsTopic = "sourceevents.*.*"
// agentEventsTopic is a topic for agents to publish their resource status update events, the first
// asterisk is a wildcard for source, the second asterisk is a wildcard for cluster.
agentEventsTopic = "agentevents.*.*"
// sourceBroadcastTopic is for a source to publish its events to all agents, the asterisk is a wildcard for source.
sourceBroadcastTopic = "sourcebroadcast.*"
// agentBroadcastTopic is for a agent to publish its events to all sources, the asterisk is a wildcard for cluster.
agentBroadcastTopic = "agentbroadcast.*"
)
type KafkaOptions struct {
// BootstrapServer is the host of the Kafka broker (hostname:port).
BootstrapServer string `json:"bootstrapServer" yaml:"bootstrapServer"`
// CAFile is the file path to a cert file for the MQTT broker certificate authority.
CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"`
// ClientCertFile is the file path to a client cert file for TLS.
ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"`
// ClientKeyFile is the file path to a client key file for TLS.
ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"`
// GroupID is a string that uniquely identifies the group of consumer processes to which this consumer belongs.
// Each different application will have a unique consumer GroupID. The default value is agentID for agent, sourceID for source
GroupID string `json:"groupID,omitempty" yaml:"groupID,omitempty"`
}
// Listen to all the events on the default events channel
// It's important to read these events otherwise the events channel will eventually fill up
// Detail: https://github.com/cloudevents/sdk-go/blob/main/protocol/kafka_confluent/v2/protocol.go#L90
func handleProduceEvents(producerEvents chan kafka.Event, errChan chan error) {
if producerEvents != nil {
return
}
go func() {
for e := range producerEvents {
switch ev := e.(type) {
case *kafka.Message:
// The message delivery report, indicating success or failure when sending message
if ev.TopicPartition.Error != nil {
klog.Errorf("Delivery failed: %v", ev.TopicPartition.Error)
}
case kafka.Error:
// Generic client instance-level errors, such as
// broker connection failures, authentication issues, etc.
errChan <- fmt.Errorf("client error %w", ev)
}
}
}()
}
// BuildKafkaOptionsFromFlags builds configs from a config filepath.
func BuildKafkaOptionsFromFlags(configPath string) (*kafka.ConfigMap, error) {
configData, err := os.ReadFile(configPath)
if err != nil {
return nil, err
}
// TODO: failed to unmarshal the data to kafka.ConfigMap directly.
// Further investigation is required to understand the reasons behind it.
config := &KafkaOptions{}
if err := yaml.Unmarshal(configData, config); err != nil {
return nil, err
}
if config.BootstrapServer == "" {
return nil, fmt.Errorf("bootstrapServer is required")
}
if (config.ClientCertFile == "" && config.ClientKeyFile != "") ||
(config.ClientCertFile != "" && config.ClientKeyFile == "") {
return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set")
}
if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" {
return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile")
}
configMap := &kafka.ConfigMap{
"bootstrap.servers": config.BootstrapServer,
"socket.keepalive.enable": true,
// silence spontaneous disconnection logs, kafka recovers by itself.
"log.connection.close": false,
// https://github.com/confluentinc/librdkafka/issues/4349
"ssl.endpoint.identification.algorithm": "none",
// the events channel server for both producer and consumer
"go.events.channel.size": 1000,
// producer
"acks": "1",
"retries": "0",
// consumer
"group.id": config.GroupID,
"enable.auto.commit": true,
"enable.auto.offset.store": false,
"queued.max.messages.kbytes": 32768, // 32 MB
"auto.offset.reset": "earliest",
}
if config.ClientCertFile != "" {
_ = configMap.SetKey("security.protocol", "ssl")
_ = configMap.SetKey("ssl.ca.location", config.CAFile)
_ = configMap.SetKey("ssl.certificate.location", config.ClientCertFile)
_ = configMap.SetKey("ssl.key.location", config.ClientKeyFile)
}
return configMap, nil
}

View File

@@ -1,87 +0,0 @@
package kafka
import (
"context"
"fmt"
"strings"
cloudevents "github.com/cloudevents/sdk-go/v2"
cloudeventscontext "github.com/cloudevents/sdk-go/v2/context"
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
confluent "github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
)
type kafkaSourceOptions struct {
configMap *kafka.ConfigMap
sourceID string
errorChan chan error
}
func NewSourceOptions(configMap *kafka.ConfigMap, sourceID string) *options.CloudEventsSourceOptions {
sourceOptions := &kafkaSourceOptions{
configMap: configMap,
sourceID: sourceID,
errorChan: make(chan error),
}
groupID, err := configMap.Get("group.id", "")
if groupID == "" || err != nil {
_ = configMap.SetKey("group.id", sourceID)
}
return &options.CloudEventsSourceOptions{
CloudEventsOptions: sourceOptions,
SourceID: sourceID,
}
}
func (o *kafkaSourceOptions) WithContext(ctx context.Context,
evtCtx cloudevents.EventContext,
) (context.Context, error) {
eventType, err := types.ParseCloudEventsType(evtCtx.GetType())
if err != nil {
return nil, err
}
clusterName, err := evtCtx.GetExtension(types.ExtensionClusterName)
if err != nil {
return nil, err
}
if eventType.Action == types.ResyncRequestAction && clusterName == types.ClusterAll {
// source request to get resources status from all agents
topic := strings.Replace(sourceBroadcastTopic, "*", o.sourceID, 1)
return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), o.sourceID), nil
}
// source publishes event to source topic to send the resource spec to a specified cluster
messageKey := fmt.Sprintf("%s@%s", o.sourceID, clusterName)
topic := strings.Replace(sourceEventsTopic, "*", o.sourceID, 1)
topic = strings.Replace(topic, "*", fmt.Sprintf("%s", clusterName), 1)
return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), messageKey), nil
}
func (o *kafkaSourceOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) {
protocol, err := confluent.New(confluent.WithConfigMap(o.configMap),
confluent.WithReceiverTopics([]string{
fmt.Sprintf("^%s", strings.Replace(agentEventsTopic, "*", o.sourceID, 1)),
fmt.Sprintf("^%s", agentBroadcastTopic),
}),
confluent.WithSenderTopic("sourceevents"),
confluent.WithErrorHandler(func(ctx context.Context, err kafka.Error) {
o.errorChan <- err
}))
if err != nil {
return nil, err
}
producerEvents, _ := protocol.Events()
handleProduceEvents(producerEvents, o.errorChan)
return protocol, nil
}
func (o *kafkaSourceOptions) ErrorChan() <-chan error {
return o.errorChan
}

View File

@@ -186,7 +186,7 @@ func ParseCloudEventsType(cloudEventsType string) (*CloudEventsType, error) {
types := strings.Split(cloudEventsType, ".")
length := len(types)
if length < 5 {
return nil, fmt.Errorf("unsupported cloudevents type format: %s", cloudEventsType)
return nil, fmt.Errorf("unsupported cloudevents type format")
}
subResource := EventSubResource(types[length-2])

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"time"
confluentkafka "github.com/confluentinc/confluent-kafka-go/v2/kafka"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
@@ -17,7 +16,6 @@ import (
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
agentclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client"
@@ -73,7 +71,6 @@ type ClientHolderBuilder struct {
// - Kubeconfig (*rest.Config): builds a manifestwork client with kubeconfig
// - MQTTOptions (*mqtt.MQTTOptions): builds a manifestwork client based on cloudevents with MQTT
// - GRPCOptions (*grpc.GRPCOptions): builds a manifestwork client based on cloudevents with GRPC
// - KafkaOptions (*kafka.KafkaOptions): builds a manifestwork client based on cloudevents with Kafka
//
// TODO using a specified config instead of any
func NewClientHolderBuilder(config any) *ClientHolderBuilder {
@@ -110,8 +107,7 @@ func (b *ClientHolderBuilder) WithCodecs(codecs ...generic.Codec[*workv1.Manifes
// WithInformerConfig set the ManifestWorkInformer configs. If the resync time is not set, the default time (10 minutes)
// will be used when building the ManifestWorkInformer.
func (b *ClientHolderBuilder) WithInformerConfig(
resyncTime time.Duration, options ...workinformers.SharedInformerOption,
) *ClientHolderBuilder {
resyncTime time.Duration, options ...workinformers.SharedInformerOption) *ClientHolderBuilder {
b.informerResyncTime = resyncTime
b.informerOptions = options
return b
@@ -126,8 +122,6 @@ func (b *ClientHolderBuilder) NewSourceClientHolder(ctx context.Context) (*Clien
return b.newSourceClients(ctx, mqtt.NewSourceOptions(config, b.clientID, b.sourceID))
case *grpc.GRPCOptions:
return b.newSourceClients(ctx, grpc.NewSourceOptions(config, b.sourceID))
case *confluentkafka.ConfigMap:
return b.newSourceClients(ctx, kafka.NewSourceOptions(config, b.sourceID))
default:
return nil, fmt.Errorf("unsupported client configuration type %T", config)
}
@@ -142,8 +136,6 @@ func (b *ClientHolderBuilder) NewAgentClientHolder(ctx context.Context) (*Client
return b.newAgentClients(ctx, mqtt.NewAgentOptions(config, b.clusterName, b.clientID))
case *grpc.GRPCOptions:
return b.newAgentClients(ctx, grpc.NewAgentOptions(config, b.clusterName, b.clientID))
case *confluentkafka.ConfigMap:
return b.newAgentClients(ctx, kafka.NewAgentOptions(config, b.clusterName, b.clientID))
default:
return nil, fmt.Errorf("unsupported client configuration type %T", config)
}

View File

@@ -6,15 +6,13 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka"
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt"
)
const (
ConfigTypeKube = "kube"
ConfigTypeMQTT = "mqtt"
ConfigTypeGRPC = "grpc"
ConfigTypeKafka = "kafka"
ConfigTypeKube = "kube"
ConfigTypeMQTT = "mqtt"
ConfigTypeGRPC = "grpc"
)
// ConfigLoader loads a configuration object with a configuration file.
@@ -31,7 +29,6 @@ type ConfigLoader struct {
// - kube
// - mqtt
// - grpc
// - kafka
func NewConfigLoader(configType, configPath string) *ConfigLoader {
return &ConfigLoader{
configType: configType,
@@ -78,21 +75,6 @@ func (l *ConfigLoader) LoadConfig() (string, any, error) {
}
return grpcOptions.URL, grpcOptions, nil
case ConfigTypeKafka:
configMap, err := kafka.BuildKafkaOptionsFromFlags(l.configPath)
if err != nil {
return "", nil, err
}
val, err := configMap.Get("bootstrap.servers", "")
if err != nil {
return "", nil, err
}
server, ok := val.(string)
if !ok {
return "", nil, fmt.Errorf("failed to get kafka bootstrap.servers from configMap")
}
return server, configMap, nil
}
return "", nil, fmt.Errorf("unsupported config type %s", l.configType)