mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
update sdk-go (#1257)
Some checks failed
Post / coverage (push) Failing after 38m23s
Post / images (amd64, addon-manager) (push) Failing after 7m53s
Post / images (amd64, placement) (push) Failing after 6m57s
Post / images (amd64, registration) (push) Failing after 7m7s
Post / images (amd64, registration-operator) (push) Failing after 7m1s
Post / images (amd64, work) (push) Failing after 7m8s
Post / images (arm64, addon-manager) (push) Failing after 7m10s
Post / images (arm64, placement) (push) Failing after 7m11s
Post / images (arm64, registration) (push) Failing after 6m58s
Post / images (arm64, registration-operator) (push) Failing after 7m17s
Post / images (arm64, work) (push) Failing after 7m18s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Scorecard supply-chain security / Scorecard analysis (push) Failing after 1m15s
Close stale issues and PRs / stale (push) Successful in 41s
Some checks failed
Post / coverage (push) Failing after 38m23s
Post / images (amd64, addon-manager) (push) Failing after 7m53s
Post / images (amd64, placement) (push) Failing after 6m57s
Post / images (amd64, registration) (push) Failing after 7m7s
Post / images (amd64, registration-operator) (push) Failing after 7m1s
Post / images (amd64, work) (push) Failing after 7m8s
Post / images (arm64, addon-manager) (push) Failing after 7m10s
Post / images (arm64, placement) (push) Failing after 7m11s
Post / images (arm64, registration) (push) Failing after 6m58s
Post / images (arm64, registration-operator) (push) Failing after 7m17s
Post / images (arm64, work) (push) Failing after 7m18s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Scorecard supply-chain security / Scorecard analysis (push) Failing after 1m15s
Close stale issues and PRs / stale (push) Successful in 41s
Signed-off-by: Wei Liu <liuweixa@redhat.com>
This commit is contained in:
20
.github/workflows/cloudevents-integration.yml
vendored
20
.github/workflows/cloudevents-integration.yml
vendored
@@ -19,8 +19,22 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
integration:
|
||||
name: cloudevents-integration
|
||||
# TODO enable this after mqtt integration is stable
|
||||
# mqtt-work-integration:
|
||||
# name: mqtt-work-integration
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: checkout code
|
||||
# uses: actions/checkout@v5
|
||||
# - name: install Go
|
||||
# uses: actions/setup-go@v6
|
||||
# with:
|
||||
# go-version: ${{ env.GO_VERSION }}
|
||||
# - name: integration
|
||||
# run: make test-cloudevents-work-mqtt-integration
|
||||
|
||||
grpc-work-integration:
|
||||
name: grpc-work-integration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout code
|
||||
@@ -30,4 +44,4 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: integration
|
||||
run: make test-cloudevents-integration
|
||||
run: make test-cloudevents-work-grpc-integration
|
||||
|
||||
10
go.mod
10
go.mod
@@ -41,7 +41,7 @@ require (
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
open-cluster-management.io/addon-framework v1.1.0
|
||||
open-cluster-management.io/api v1.1.1-0.20251112045944-3e1bb92b69e3
|
||||
open-cluster-management.io/sdk-go v1.1.0
|
||||
open-cluster-management.io/sdk-go v1.1.1-0.20251117075350-a9794783fa67
|
||||
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03
|
||||
sigs.k8s.io/cluster-inventory-api v0.0.0-20240730014211-ef0154379848
|
||||
sigs.k8s.io/controller-runtime v0.21.0
|
||||
@@ -58,8 +58,6 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.24 // indirect
|
||||
@@ -77,14 +75,10 @@ require (
|
||||
github.com/bwmarrin/snowflake v0.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 // indirect
|
||||
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20250922144431-372892d7c84d // indirect
|
||||
github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/docker/docker v28.4.0+incompatible // indirect
|
||||
github.com/eclipse/paho.golang v0.23.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
@@ -121,8 +115,6 @@ require (
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
|
||||
56
go.sum
56
go.sum
@@ -7,8 +7,6 @@ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
@@ -18,10 +16,6 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
|
||||
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
@@ -77,25 +71,11 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 h1:3/pjormyqkSjF2GHQehTELZ9oqlER4GrJZiVUIk8Fy8=
|
||||
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991/go.mod h1:xiar5+gk13WqyAUQ/cpcxcjD1IhLe/PeilSfCdPcfMU=
|
||||
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20250922144431-372892d7c84d h1:lPjd5+7dPQgG2LJCTusnQ2X7e7jJbbjGvMq2nZCktlc=
|
||||
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20250922144431-372892d7c84d/go.mod h1:uGZIO7gmadwSmzHXpemf29e7wdvsceJOUZF2zVtT4Mw=
|
||||
github.com/cloudevents/sdk-go/v2 v2.16.2 h1:ZYDFrYke4FD+jM8TZTJJO6JhKHzOQl2oqpFK1D+NnQM=
|
||||
github.com/cloudevents/sdk-go/v2 v2.16.2/go.mod h1:laOcGImm4nVJEU+PHnUrKL56CKmRL65RlQF0kRmW/kg=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 h1:icCHutJouWlQREayFwCc7lxDAhws08td+W3/gdqgZts=
|
||||
github.com/confluentinc/confluent-kafka-go/v2 v2.3.0/go.mod h1:/VTy8iEpe6mD9pkCH5BhijlUl8ulUXymKv1Qig5Rgb8=
|
||||
github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII=
|
||||
github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
@@ -108,16 +88,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk=
|
||||
github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/eclipse/paho.golang v0.23.0 h1:KHgl2wz6EJo7cMBmkuhpt7C576vP+kpPv7jjvSyR6Mk=
|
||||
@@ -247,28 +219,12 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/mochi-mqtt/server/v2 v2.7.9 h1:y0g4vrSLAag7T07l2oCzOa/+nKVLoazKEWAArwqBNYI=
|
||||
github.com/mochi-mqtt/server/v2 v2.7.9/go.mod h1:lZD3j35AVNqJL5cezlnSkuG05c0FCHSsfAKSPBOSbqc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -276,8 +232,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
@@ -288,10 +242,6 @@ github.com/onsi/ginkgo/v2 v2.25.1 h1:Fwp6crTREKM+oA6Cz4MsO8RhKQzs2/gOIVOUscMAfZY
|
||||
github.com/onsi/ginkgo/v2 v2.25.1/go.mod h1:ppTWQ1dh9KM/F1XgpeRqelR+zHVwV81DGRSDnFxK7Sk=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/openshift/api v0.0.0-20250710004639-926605d3338b h1:A8OY6adT2aZNp7tsGsilHuQ3RqhzrFx5dzGr/UwXfJg=
|
||||
github.com/openshift/api v0.0.0-20250710004639-926605d3338b/go.mod h1:SPLf21TYPipzCO67BURkCfK6dcIIxx0oNRVWaOyRcXM=
|
||||
github.com/openshift/build-machinery-go v0.0.0-20250602125535-1b6d00b8c37c h1:gJvhduWIrpzoUTwrJjjeul+hGETKkhRhEZosBg/X3Hg=
|
||||
@@ -363,8 +313,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/testcontainers/testcontainers-go v0.14.0 h1:h0D5GaYG9mhOWr2qHdEKDXpkce/VlvaYOCzTRi6UBi8=
|
||||
github.com/testcontainers/testcontainers-go v0.14.0/go.mod h1:hSRGJ1G8Q5Bw2gXgPulJOLlEBaYJHeBSOkQM5JLG+JQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
@@ -569,8 +517,8 @@ open-cluster-management.io/addon-framework v1.1.0 h1:GoPbg5Q9KEI+Vvgs9PUs2IjIoU/
|
||||
open-cluster-management.io/addon-framework v1.1.0/go.mod h1:KPdLM+CfUKgwVuVE9Tyu2nOuD6LgDmx94HOCnJwLIdo=
|
||||
open-cluster-management.io/api v1.1.1-0.20251112045944-3e1bb92b69e3 h1:pJl/jwiUBO0D4PrL+G6JASKC8PDpPoxItLa6cTcj8TM=
|
||||
open-cluster-management.io/api v1.1.1-0.20251112045944-3e1bb92b69e3/go.mod h1:lEc5Wkc9ON5ym/qAtIqNgrE7NW7IEOCOC611iQMlnKM=
|
||||
open-cluster-management.io/sdk-go v1.1.0 h1:vYGkoihIVetyVT4ICO7HjoUHsnh6Gf+Da4ZSmWCamhc=
|
||||
open-cluster-management.io/sdk-go v1.1.0/go.mod h1:DH4EMNDMiousmaj+noHYQxm48T+dbogiAfALhDnrjMg=
|
||||
open-cluster-management.io/sdk-go v1.1.1-0.20251117075350-a9794783fa67 h1:G4w5+FI1VpgLLJcijm4lGwSMXev0377iJ3Jlx62VKCY=
|
||||
open-cluster-management.io/sdk-go v1.1.1-0.20251117075350-a9794783fa67/go.mod h1:oQzZFphlr1hfzRGrMa24OYCFg9ZmMTJov3mb8OLVOaM=
|
||||
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03 h1:1ShFiMjGQOR/8jTBkmZrk1gORxnvMwm1nOy2/DbHg4U=
|
||||
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03/go.mod h1:F1pT4mK53U6F16/zuaPSYpBaR7x5Kjym6aKJJC0/DHU=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
cloudeventsoptions "open-cluster-management.io/sdk-go/pkg/cloudevents/clients/options"
|
||||
cloudeventsstore "open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/constants"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/builder"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/cert"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc"
|
||||
|
||||
@@ -84,7 +84,6 @@ func (d *GRPCDriver) BuildClients(ctx context.Context, secretOption register.Sec
|
||||
clusterClient := clusterClientHolder.ClusterInterface()
|
||||
clusterInformers := clusterinformers.NewSharedInformerFactory(
|
||||
clusterClient, 10*time.Minute).Cluster().V1().ManagedClusters()
|
||||
clusterWatchStore.SetInformer(clusterInformers.Informer())
|
||||
|
||||
csrClientHolder, err := cloudeventscsr.NewAgentClientHolder(ctx,
|
||||
cloudeventsoptions.NewGenericClientOptions(
|
||||
@@ -152,7 +151,6 @@ func (d *GRPCDriver) BuildClients(ctx context.Context, secretOption register.Sec
|
||||
addonInformer := addoninformers.NewSharedInformerFactoryWithOptions(
|
||||
addonClient, 10*time.Minute, addoninformers.WithNamespace(secretOption.ClusterName)).
|
||||
Addon().V1alpha1().ManagedClusterAddOns()
|
||||
addonWatchStore.SetInformer(addonInformer.Informer())
|
||||
|
||||
clients := ®ister.Clients{
|
||||
ClusterClient: clusterClient,
|
||||
@@ -237,7 +235,7 @@ func (d *GRPCDriver) loadConfig(secretOption register.SecretOption, bootstrapped
|
||||
var config any
|
||||
var configFile string
|
||||
if bootstrapped {
|
||||
_, config, err = generic.NewConfigLoader(constants.ConfigTypeGRPC, d.opt.BootstrapConfigFile).LoadConfig()
|
||||
_, config, err = builder.NewConfigLoader(constants.ConfigTypeGRPC, d.opt.BootstrapConfigFile).LoadConfig()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"failed to load hub bootstrap registration config from file %q: %w",
|
||||
@@ -246,7 +244,7 @@ func (d *GRPCDriver) loadConfig(secretOption register.SecretOption, bootstrapped
|
||||
|
||||
configFile = d.opt.BootstrapConfigFile
|
||||
} else {
|
||||
_, config, err = generic.NewConfigLoader(constants.ConfigTypeGRPC, d.opt.ConfigFile).LoadConfig()
|
||||
_, config, err = builder.NewConfigLoader(constants.ConfigTypeGRPC, d.opt.ConfigFile).LoadConfig()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"failed to load hub registration config from file %q: %w",
|
||||
|
||||
@@ -56,17 +56,17 @@ func (o *GRPCServerOptions) Run(ctx context.Context, controllerContext *controll
|
||||
|
||||
// initlize grpc broker and register services
|
||||
grpcEventServer := cloudeventsgrpc.NewGRPCBroker()
|
||||
grpcEventServer.RegisterService(clusterce.ManagedClusterEventDataType,
|
||||
grpcEventServer.RegisterService(ctx, clusterce.ManagedClusterEventDataType,
|
||||
cluster.NewClusterService(clients.ClusterClient, clients.ClusterInformers.Cluster().V1().ManagedClusters()))
|
||||
grpcEventServer.RegisterService(csrce.CSREventDataType,
|
||||
grpcEventServer.RegisterService(ctx, csrce.CSREventDataType,
|
||||
csr.NewCSRService(clients.KubeClient, clients.KubeInformers.Certificates().V1().CertificateSigningRequests()))
|
||||
grpcEventServer.RegisterService(addonce.ManagedClusterAddOnEventDataType,
|
||||
grpcEventServer.RegisterService(ctx, addonce.ManagedClusterAddOnEventDataType,
|
||||
addon.NewAddonService(clients.AddOnClient, clients.AddOnInformers.Addon().V1alpha1().ManagedClusterAddOns()))
|
||||
grpcEventServer.RegisterService(eventce.EventEventDataType,
|
||||
grpcEventServer.RegisterService(ctx, eventce.EventEventDataType,
|
||||
event.NewEventService(clients.KubeClient))
|
||||
grpcEventServer.RegisterService(leasece.LeaseEventDataType,
|
||||
grpcEventServer.RegisterService(ctx, leasece.LeaseEventDataType,
|
||||
lease.NewLeaseService(clients.KubeClient, clients.KubeInformers.Coordination().V1().Leases()))
|
||||
grpcEventServer.RegisterService(payload.ManifestBundleEventDataType,
|
||||
grpcEventServer.RegisterService(ctx, payload.ManifestBundleEventDataType,
|
||||
work.NewWorkService(clients.WorkClient, clients.WorkInformers.Work().V1().ManifestWorks()))
|
||||
|
||||
// initlize and run grpc server
|
||||
|
||||
@@ -90,7 +90,7 @@ func (s *AddonService) HandleStatusUpdate(ctx context.Context, evt *cloudevents.
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AddonService) RegisterHandler(handler server.EventHandler) {
|
||||
func (s *AddonService) RegisterHandler(ctx context.Context, handler server.EventHandler) {
|
||||
if _, err := s.addonInformer.Informer().AddEventHandler(s.EventHandlerFuncs(handler)); err != nil {
|
||||
klog.Errorf("failed to register addon informer event handler, %v", err)
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func (c *ClusterService) HandleStatusUpdate(ctx context.Context, evt *cloudevent
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClusterService) RegisterHandler(handler server.EventHandler) {
|
||||
func (c *ClusterService) RegisterHandler(ctx context.Context, handler server.EventHandler) {
|
||||
if _, err := c.clusterInformer.Informer().AddEventHandler(c.EventHandlerFuncs(handler)); err != nil {
|
||||
klog.Errorf("failed to register cluster informer event handler, %v", err)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (c *CSRService) HandleStatusUpdate(ctx context.Context, evt *cloudevents.Ev
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CSRService) RegisterHandler(handler server.EventHandler) {
|
||||
func (c *CSRService) RegisterHandler(ctx context.Context, handler server.EventHandler) {
|
||||
if _, err := c.csrInformer.Informer().AddEventHandler(c.EventHandlerFuncs(handler)); err != nil {
|
||||
klog.Errorf("failed to register csr informer event handler, %v", err)
|
||||
}
|
||||
|
||||
@@ -73,6 +73,6 @@ func (e *EventService) HandleStatusUpdate(ctx context.Context, evt *cloudevents.
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventService) RegisterHandler(handler server.EventHandler) {
|
||||
func (e *EventService) RegisterHandler(ctx context.Context, handler server.EventHandler) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (l *LeaseService) HandleStatusUpdate(ctx context.Context, evt *cloudevents.
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LeaseService) RegisterHandler(handler server.EventHandler) {
|
||||
func (l *LeaseService) RegisterHandler(ctx context.Context, handler server.EventHandler) {
|
||||
if _, err := l.informer.Informer().AddEventHandler(l.EventHandlerFuncs(handler)); err != nil {
|
||||
klog.Errorf("failed to register lease informer event handler, %v", err)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
||||
cloudeventstypes "github.com/cloudevents/sdk-go/v2/types"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -59,8 +60,6 @@ func (w *WorkService) Get(ctx context.Context, resourceID string) (*cloudevents.
|
||||
}
|
||||
|
||||
work = work.DeepCopy()
|
||||
// use the work generation as the work cloudevent resource version
|
||||
work.ResourceVersion = fmt.Sprintf("%d", work.Generation)
|
||||
return w.codec.Encode(services.CloudEventsSourceKube, types.CloudEventsType{CloudEventsDataType: payload.ManifestBundleEventDataType}, work)
|
||||
}
|
||||
|
||||
@@ -146,7 +145,7 @@ func (w *WorkService) HandleStatusUpdate(ctx context.Context, evt *cloudevents.E
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WorkService) RegisterHandler(handler server.EventHandler) {
|
||||
func (w *WorkService) RegisterHandler(ctx context.Context, handler server.EventHandler) {
|
||||
if _, err := w.workInformer.Informer().AddEventHandler(w.EventHandlerFuncs(handler)); err != nil {
|
||||
klog.Errorf("failed to register work informer event handler, %v", err)
|
||||
}
|
||||
@@ -166,12 +165,27 @@ func (w *WorkService) EventHandlerFuncs(handler server.EventHandler) *cache.Reso
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
accessor, err := meta.Accessor(newObj)
|
||||
oldAccessor, err := meta.Accessor(oldObj)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get accessor for work %v", err)
|
||||
return
|
||||
}
|
||||
id := accessor.GetNamespace() + "/" + accessor.GetName()
|
||||
|
||||
newAccessor, err := meta.Accessor(newObj)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get accessor for work %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// the manifestwork is not changed and is not deleting
|
||||
if cmp.Equal(oldAccessor.GetLabels(), newAccessor.GetLabels()) &&
|
||||
cmp.Equal(oldAccessor.GetAnnotations(), newAccessor.GetAnnotations()) &&
|
||||
oldAccessor.GetGeneration() == newAccessor.GetGeneration() &&
|
||||
newAccessor.GetDeletionTimestamp().IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
id := newAccessor.GetNamespace() + "/" + newAccessor.GetName()
|
||||
if err := handler.OnUpdate(context.Background(), payload.ManifestBundleEventDataType, id); err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
@@ -343,14 +343,61 @@ func TestEventHandlerFuncs(t *testing.T) {
|
||||
eventHandlerFuncs := service.EventHandlerFuncs(handler)
|
||||
|
||||
work := &workv1.ManifestWork{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-work", Namespace: "test-namespace"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-namespace",
|
||||
Generation: 1,
|
||||
},
|
||||
}
|
||||
eventHandlerFuncs.AddFunc(work)
|
||||
if !handler.onCreateCalled {
|
||||
t.Errorf("onCreate not called")
|
||||
}
|
||||
|
||||
eventHandlerFuncs.UpdateFunc(nil, work)
|
||||
eventHandlerFuncs.UpdateFunc(work, &workv1.ManifestWork{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-namespace",
|
||||
Generation: 2,
|
||||
},
|
||||
})
|
||||
if !handler.onUpdateCalled {
|
||||
t.Errorf("onUpdate not called")
|
||||
}
|
||||
|
||||
eventHandlerFuncs.UpdateFunc(work, &workv1.ManifestWork{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-namespace",
|
||||
Generation: 1,
|
||||
Labels: map[string]string{"test": "test"},
|
||||
},
|
||||
})
|
||||
if !handler.onUpdateCalled {
|
||||
t.Errorf("onUpdate not called")
|
||||
}
|
||||
|
||||
eventHandlerFuncs.UpdateFunc(work, &workv1.ManifestWork{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-namespace",
|
||||
Generation: 1,
|
||||
Annotations: map[string]string{"test": "test"},
|
||||
},
|
||||
})
|
||||
if !handler.onUpdateCalled {
|
||||
t.Errorf("onUpdate not called")
|
||||
}
|
||||
|
||||
time := metav1.Now()
|
||||
eventHandlerFuncs.UpdateFunc(work, &workv1.ManifestWork{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-work",
|
||||
Namespace: "test-namespace",
|
||||
DeletionTimestamp: &time,
|
||||
Generation: 1,
|
||||
},
|
||||
})
|
||||
if !handler.onUpdateCalled {
|
||||
t.Errorf("onUpdate not called")
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/source/codec"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/builder"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/work/hub/controllers/manifestworkgarbagecollection"
|
||||
@@ -77,7 +77,7 @@ func (c *WorkHubManagerConfig) RunWorkHubManager(ctx context.Context, controller
|
||||
|
||||
watcherStore = store.NewSourceInformerWatcherStore(ctx)
|
||||
|
||||
_, config, err := generic.NewConfigLoader(c.workOptions.WorkDriver, c.workOptions.WorkDriverConfig).LoadConfig()
|
||||
_, config, err := builder.NewConfigLoader(c.workOptions.WorkDriver, c.workOptions.WorkDriverConfig).LoadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/agent/codec"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/builder"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
@@ -234,7 +234,7 @@ func (o *WorkAgentConfig) newWorkClientAndInformer(
|
||||
|
||||
watcherStore = store.NewAgentInformerWatcherStore()
|
||||
|
||||
serverHost, config, err := generic.NewConfigLoader(o.workOptions.WorkloadSourceDriver, o.workOptions.WorkloadSourceConfig).
|
||||
serverHost, config, err := builder.NewConfigLoader(o.workOptions.WorkloadSourceDriver, o.workOptions.WorkloadSourceConfig).
|
||||
LoadConfig()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
@@ -262,10 +262,5 @@ func (o *WorkAgentConfig) newWorkClientAndInformer(
|
||||
)
|
||||
informer := factory.Work().V1().ManifestWorks()
|
||||
|
||||
// For cloudevents work client, we use the informer store as the client store
|
||||
if watcherStore != nil {
|
||||
watcherStore.SetInformer(informer.Informer())
|
||||
}
|
||||
|
||||
return hubHost, workClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), informer, nil
|
||||
}
|
||||
|
||||
@@ -86,15 +86,15 @@ var _ = ginkgo.Describe("Registration using GRPC", ginkgo.Ordered, ginkgo.Label(
|
||||
go hook.Run(grpcServerCtx)
|
||||
|
||||
grpcEventServer := cloudeventsgrpc.NewGRPCBroker()
|
||||
grpcEventServer.RegisterService(clusterce.ManagedClusterEventDataType,
|
||||
grpcEventServer.RegisterService(grpcServerCtx, clusterce.ManagedClusterEventDataType,
|
||||
cluster.NewClusterService(hook.ClusterClient, hook.ClusterInformers.Cluster().V1().ManagedClusters()))
|
||||
grpcEventServer.RegisterService(csrce.CSREventDataType,
|
||||
grpcEventServer.RegisterService(grpcServerCtx, csrce.CSREventDataType,
|
||||
csr.NewCSRService(hook.KubeClient, hook.KubeInformers.Certificates().V1().CertificateSigningRequests()))
|
||||
grpcEventServer.RegisterService(addonce.ManagedClusterAddOnEventDataType,
|
||||
grpcEventServer.RegisterService(grpcServerCtx, addonce.ManagedClusterAddOnEventDataType,
|
||||
addon.NewAddonService(hook.AddOnClient, hook.AddOnInformers.Addon().V1alpha1().ManagedClusterAddOns()))
|
||||
grpcEventServer.RegisterService(eventce.EventEventDataType,
|
||||
grpcEventServer.RegisterService(grpcServerCtx, eventce.EventEventDataType,
|
||||
event.NewEventService(hook.KubeClient))
|
||||
grpcEventServer.RegisterService(leasece.LeaseEventDataType,
|
||||
grpcEventServer.RegisterService(grpcServerCtx, leasece.LeaseEventDataType,
|
||||
lease.NewLeaseService(hook.KubeClient, hook.KubeInformers.Coordination().V1().Leases()))
|
||||
|
||||
authorizer := util.NewMockAuthorizer()
|
||||
|
||||
@@ -280,7 +280,7 @@ func startGRPCServer(ctx context.Context, temp string, cfg *rest.Config) (string
|
||||
}()
|
||||
|
||||
grpcEventServer := cloudeventsgrpc.NewGRPCBroker()
|
||||
grpcEventServer.RegisterService(payload.ManifestBundleEventDataType,
|
||||
grpcEventServer.RegisterService(ctx, payload.ManifestBundleEventDataType,
|
||||
serviceswork.NewWorkService(hook.WorkClient, hook.WorkInformers.Work().V1().ManifestWorks()))
|
||||
|
||||
authorizer := util.NewMockAuthorizer()
|
||||
|
||||
201
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/LICENSE
generated
vendored
201
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
156
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/message.go
generated
vendored
156
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/message.go
generated
vendored
@@ -1,156 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The CloudEvents Authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package kafka_confluent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudevents/sdk-go/v2/binding"
|
||||
"github.com/cloudevents/sdk-go/v2/binding/format"
|
||||
"github.com/cloudevents/sdk-go/v2/binding/spec"
|
||||
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "ce-"
|
||||
contentTypeKey = "content-type"
|
||||
)
|
||||
|
||||
const (
|
||||
KafkaOffsetKey = "kafkaoffset"
|
||||
KafkaPartitionKey = "kafkapartition"
|
||||
KafkaTopicKey = "kafkatopic"
|
||||
KafkaMessageKey = "kafkamessagekey"
|
||||
)
|
||||
|
||||
var specs = spec.WithPrefix(prefix)
|
||||
|
||||
// Message represents a Kafka message.
|
||||
// This message *can* be read several times safely
|
||||
type Message struct {
|
||||
internal *kafka.Message
|
||||
properties map[string][]byte
|
||||
format format.Format
|
||||
version spec.Version
|
||||
}
|
||||
|
||||
// Check if Message implements binding.Message
|
||||
var (
|
||||
_ binding.Message = (*Message)(nil)
|
||||
_ binding.MessageMetadataReader = (*Message)(nil)
|
||||
)
|
||||
|
||||
// NewMessage returns a binding.Message that holds the provided kafka.Message.
|
||||
// The returned binding.Message *can* be read several times safely
|
||||
// This function *doesn't* guarantee that the returned binding.Message is always a kafka_sarama.Message instance
|
||||
func NewMessage(msg *kafka.Message) *Message {
|
||||
if msg == nil {
|
||||
panic("the kafka.Message shouldn't be nil")
|
||||
}
|
||||
if msg.TopicPartition.Topic == nil {
|
||||
panic("the topic of kafka.Message shouldn't be nil")
|
||||
}
|
||||
if msg.TopicPartition.Partition < 0 || msg.TopicPartition.Offset < 0 {
|
||||
panic("the partition or offset of the kafka.Message must be non-negative")
|
||||
}
|
||||
|
||||
var contentType, contentVersion string
|
||||
properties := make(map[string][]byte, len(msg.Headers)+3)
|
||||
for _, header := range msg.Headers {
|
||||
k := strings.ToLower(string(header.Key))
|
||||
if k == strings.ToLower(contentTypeKey) {
|
||||
contentType = string(header.Value)
|
||||
}
|
||||
if k == specs.PrefixedSpecVersionName() {
|
||||
contentVersion = string(header.Value)
|
||||
}
|
||||
properties[k] = header.Value
|
||||
}
|
||||
|
||||
// add the kafka message key, topic, partition and partition key to the properties
|
||||
properties[prefix+KafkaOffsetKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Offset), 10))
|
||||
properties[prefix+KafkaPartitionKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Partition), 10))
|
||||
properties[prefix+KafkaTopicKey] = []byte(*msg.TopicPartition.Topic)
|
||||
if msg.Key != nil {
|
||||
properties[prefix+KafkaMessageKey] = msg.Key
|
||||
}
|
||||
|
||||
message := &Message{
|
||||
internal: msg,
|
||||
properties: properties,
|
||||
}
|
||||
if ft := format.Lookup(contentType); ft != nil {
|
||||
message.format = ft
|
||||
} else if v := specs.Version(contentVersion); v != nil {
|
||||
message.version = v
|
||||
}
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
func (m *Message) ReadEncoding() binding.Encoding {
|
||||
if m.version != nil {
|
||||
return binding.EncodingBinary
|
||||
}
|
||||
if m.format != nil {
|
||||
return binding.EncodingStructured
|
||||
}
|
||||
return binding.EncodingUnknown
|
||||
}
|
||||
|
||||
func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error {
|
||||
if m.format != nil {
|
||||
return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.Value))
|
||||
}
|
||||
return binding.ErrNotStructured
|
||||
}
|
||||
|
||||
func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error {
|
||||
if m.version == nil {
|
||||
return binding.ErrNotBinary
|
||||
}
|
||||
|
||||
var err error
|
||||
for k, v := range m.properties {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
attr := m.version.Attribute(k)
|
||||
if attr != nil {
|
||||
err = encoder.SetAttribute(attr, string(v))
|
||||
} else {
|
||||
err = encoder.SetExtension(strings.TrimPrefix(k, prefix), string(v))
|
||||
}
|
||||
} else if k == strings.ToLower(contentTypeKey) {
|
||||
err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), string(v))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if m.internal.Value != nil {
|
||||
err = encoder.SetData(bytes.NewBuffer(m.internal.Value))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Message) Finish(error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) {
|
||||
attr := m.version.AttributeFromKind(k)
|
||||
if attr == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return attr, m.properties[attr.PrefixedName()]
|
||||
}
|
||||
|
||||
func (m *Message) GetExtension(name string) interface{} {
|
||||
return m.properties[prefix+name]
|
||||
}
|
||||
151
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/option.go
generated
vendored
151
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/option.go
generated
vendored
@@ -1,151 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The CloudEvents Authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package kafka_confluent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
|
||||
)
|
||||
|
||||
// Option is the function signature required to be considered an kafka_confluent.Option.
|
||||
type Option func(*Protocol) error
|
||||
|
||||
// WithConfigMap sets the configMap to init the kafka client.
|
||||
func WithConfigMap(config *kafka.ConfigMap) Option {
|
||||
return func(p *Protocol) error {
|
||||
if config == nil {
|
||||
return errors.New("the kafka.ConfigMap option must not be nil")
|
||||
}
|
||||
p.kafkaConfigMap = config
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSenderTopic sets the defaultTopic for the kafka.Producer.
|
||||
func WithSenderTopic(defaultTopic string) Option {
|
||||
return func(p *Protocol) error {
|
||||
if defaultTopic == "" {
|
||||
return errors.New("the producer topic option must not be nil")
|
||||
}
|
||||
p.producerDefaultTopic = defaultTopic
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithReceiverTopics sets the topics for the kafka.Consumer.
|
||||
func WithReceiverTopics(topics []string) Option {
|
||||
return func(p *Protocol) error {
|
||||
if topics == nil {
|
||||
return errors.New("the consumer topics option must not be nil")
|
||||
}
|
||||
p.consumerTopics = topics
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRebalanceCallBack sets the callback for rebalancing of the consumer group.
|
||||
func WithRebalanceCallBack(rebalanceCb kafka.RebalanceCb) Option {
|
||||
return func(p *Protocol) error {
|
||||
if rebalanceCb == nil {
|
||||
return errors.New("the consumer group rebalance callback must not be nil")
|
||||
}
|
||||
p.consumerRebalanceCb = rebalanceCb
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPollTimeout sets timeout of the consumer polling for message or events, return nil on timeout.
|
||||
func WithPollTimeout(timeoutMs int) Option {
|
||||
return func(p *Protocol) error {
|
||||
p.consumerPollTimeout = timeoutMs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSender set a kafka.Producer instance to init the client directly.
|
||||
func WithSender(producer *kafka.Producer) Option {
|
||||
return func(p *Protocol) error {
|
||||
if producer == nil {
|
||||
return errors.New("the producer option must not be nil")
|
||||
}
|
||||
p.producer = producer
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithErrorHandler provide a func on how to handle the kafka.Error which the kafka.Consumer has polled.
|
||||
func WithErrorHandler(handler func(ctx context.Context, err kafka.Error)) Option {
|
||||
return func(p *Protocol) error {
|
||||
p.consumerErrorHandler = handler
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithReceiver set a kafka.Consumer instance to init the client directly.
|
||||
func WithReceiver(consumer *kafka.Consumer) Option {
|
||||
return func(p *Protocol) error {
|
||||
if consumer == nil {
|
||||
return errors.New("the consumer option must not be nil")
|
||||
}
|
||||
p.consumer = consumer
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Opaque key type used to store topicPartitionOffsets: assign them from ctx.
|
||||
type topicPartitionOffsetsType struct{}
|
||||
|
||||
var offsetKey = topicPartitionOffsetsType{}
|
||||
|
||||
// WithTopicPartitionOffsets will set the positions where the consumer starts consuming from.
|
||||
func WithTopicPartitionOffsets(ctx context.Context, topicPartitionOffsets []kafka.TopicPartition) context.Context {
|
||||
if len(topicPartitionOffsets) == 0 {
|
||||
panic("the topicPartitionOffsets cannot be empty")
|
||||
}
|
||||
for _, offset := range topicPartitionOffsets {
|
||||
if offset.Topic == nil || *(offset.Topic) == "" {
|
||||
panic("the kafka topic cannot be nil or empty")
|
||||
}
|
||||
if offset.Partition < 0 || offset.Offset < 0 {
|
||||
panic("the kafka partition/offset must be non-negative")
|
||||
}
|
||||
}
|
||||
return context.WithValue(ctx, offsetKey, topicPartitionOffsets)
|
||||
}
|
||||
|
||||
// TopicPartitionOffsetsFrom looks in the given context and returns []kafka.TopicPartition or nil if not set
|
||||
func TopicPartitionOffsetsFrom(ctx context.Context) []kafka.TopicPartition {
|
||||
c := ctx.Value(offsetKey)
|
||||
if c != nil {
|
||||
if s, ok := c.([]kafka.TopicPartition); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Opaque key type used to store message key
|
||||
type messageKeyType struct{}
|
||||
|
||||
var keyForMessageKey = messageKeyType{}
|
||||
|
||||
// WithMessageKey returns back a new context with the given messageKey.
|
||||
func WithMessageKey(ctx context.Context, messageKey string) context.Context {
|
||||
return context.WithValue(ctx, keyForMessageKey, messageKey)
|
||||
}
|
||||
|
||||
// MessageKeyFrom looks in the given context and returns `messageKey` as a string if found and valid, otherwise "".
|
||||
func MessageKeyFrom(ctx context.Context) string {
|
||||
c := ctx.Value(keyForMessageKey)
|
||||
if c != nil {
|
||||
if s, ok := c.(string); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
247
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/protocol.go
generated
vendored
247
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/protocol.go
generated
vendored
@@ -1,247 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The CloudEvents Authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package kafka_confluent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/cloudevents/sdk-go/v2/binding"
|
||||
"github.com/cloudevents/sdk-go/v2/protocol"
|
||||
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
|
||||
|
||||
cecontext "github.com/cloudevents/sdk-go/v2/context"
|
||||
)
|
||||
|
||||
var (
|
||||
_ protocol.Sender = (*Protocol)(nil)
|
||||
_ protocol.Opener = (*Protocol)(nil)
|
||||
_ protocol.Receiver = (*Protocol)(nil)
|
||||
_ protocol.Closer = (*Protocol)(nil)
|
||||
)
|
||||
|
||||
type Protocol struct {
|
||||
kafkaConfigMap *kafka.ConfigMap
|
||||
|
||||
consumer *kafka.Consumer
|
||||
consumerTopics []string
|
||||
consumerRebalanceCb kafka.RebalanceCb // optional
|
||||
consumerPollTimeout int // optional
|
||||
consumerErrorHandler func(ctx context.Context, err kafka.Error) // optional
|
||||
consumerMux sync.Mutex
|
||||
consumerIncoming chan *kafka.Message
|
||||
consumerCtx context.Context
|
||||
consumerCancel context.CancelFunc
|
||||
|
||||
producer *kafka.Producer
|
||||
producerDefaultTopic string // optional
|
||||
|
||||
closerMux sync.Mutex
|
||||
}
|
||||
|
||||
func New(opts ...Option) (*Protocol, error) {
|
||||
p := &Protocol{
|
||||
consumerPollTimeout: 100,
|
||||
consumerIncoming: make(chan *kafka.Message),
|
||||
}
|
||||
if err := p.applyOptions(opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.kafkaConfigMap != nil {
|
||||
if p.consumerTopics != nil && p.consumer == nil {
|
||||
consumer, err := kafka.NewConsumer(p.kafkaConfigMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.consumer = consumer
|
||||
}
|
||||
if p.producerDefaultTopic != "" && p.producer == nil {
|
||||
producer, err := kafka.NewProducer(p.kafkaConfigMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.producer = producer
|
||||
}
|
||||
if p.producer == nil && p.consumer == nil {
|
||||
return nil, errors.New("at least receiver or sender topic must be set")
|
||||
}
|
||||
}
|
||||
if p.producerDefaultTopic != "" && p.producer == nil {
|
||||
return nil, fmt.Errorf("at least configmap or producer must be set for the sender topic: %s", p.producerDefaultTopic)
|
||||
}
|
||||
|
||||
if len(p.consumerTopics) > 0 && p.consumer == nil {
|
||||
return nil, fmt.Errorf("at least configmap or consumer must be set for the receiver topics: %s", p.consumerTopics)
|
||||
}
|
||||
|
||||
if p.kafkaConfigMap == nil && p.producer == nil && p.consumer == nil {
|
||||
return nil, errors.New("at least one of the following to initialize the protocol must be set: config, producer, or consumer")
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Events returns the events channel used by Confluent Kafka to deliver the result from a produce, i.e., send, operation.
|
||||
// When using this SDK to produce (send) messages, this channel must be monitored to avoid resource leaks and this channel becoming full. See Confluent SDK for Go for details on the implementation.
|
||||
func (p *Protocol) Events() (chan kafka.Event, error) {
|
||||
if p.producer == nil {
|
||||
return nil, errors.New("producer not set")
|
||||
}
|
||||
return p.producer.Events(), nil
|
||||
}
|
||||
|
||||
func (p *Protocol) applyOptions(opts ...Option) error {
|
||||
for _, fn := range opts {
|
||||
if err := fn(p); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send message by kafka.Producer. You must monitor the Events() channel when using this function.
|
||||
func (p *Protocol) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) (err error) {
|
||||
if p.producer == nil {
|
||||
return errors.New("producer client must be set")
|
||||
}
|
||||
|
||||
p.closerMux.Lock()
|
||||
defer p.closerMux.Unlock()
|
||||
if p.producer.IsClosed() {
|
||||
return errors.New("producer is closed")
|
||||
}
|
||||
|
||||
defer in.Finish(err)
|
||||
|
||||
kafkaMsg := &kafka.Message{
|
||||
TopicPartition: kafka.TopicPartition{
|
||||
Topic: &p.producerDefaultTopic,
|
||||
Partition: kafka.PartitionAny,
|
||||
},
|
||||
}
|
||||
|
||||
if topic := cecontext.TopicFrom(ctx); topic != "" {
|
||||
kafkaMsg.TopicPartition.Topic = &topic
|
||||
}
|
||||
|
||||
if messageKey := MessageKeyFrom(ctx); messageKey != "" {
|
||||
kafkaMsg.Key = []byte(messageKey)
|
||||
}
|
||||
|
||||
if err = WriteProducerMessage(ctx, in, kafkaMsg, transformers...); err != nil {
|
||||
return fmt.Errorf("create producer message: %w", err)
|
||||
}
|
||||
|
||||
if err = p.producer.Produce(kafkaMsg, nil); err != nil {
|
||||
return fmt.Errorf("produce message: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Protocol) OpenInbound(ctx context.Context) error {
|
||||
if p.consumer == nil {
|
||||
return errors.New("the consumer client must be set")
|
||||
}
|
||||
if p.consumerTopics == nil {
|
||||
return errors.New("the consumer topics must be set")
|
||||
}
|
||||
|
||||
p.consumerMux.Lock()
|
||||
defer p.consumerMux.Unlock()
|
||||
logger := cecontext.LoggerFrom(ctx)
|
||||
|
||||
// Query committed offsets for each partition
|
||||
if positions := TopicPartitionOffsetsFrom(ctx); positions != nil {
|
||||
if err := p.consumer.Assign(positions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logger.Infof("Subscribing to topics: %v", p.consumerTopics)
|
||||
err := p.consumer.SubscribeTopics(p.consumerTopics, p.consumerRebalanceCb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.closerMux.Lock()
|
||||
p.consumerCtx, p.consumerCancel = context.WithCancel(ctx)
|
||||
defer p.consumerCancel()
|
||||
p.closerMux.Unlock()
|
||||
|
||||
defer func() {
|
||||
if !p.consumer.IsClosed() {
|
||||
logger.Infof("Closing consumer %v", p.consumerTopics)
|
||||
if err = p.consumer.Close(); err != nil {
|
||||
logger.Errorf("failed to close the consumer: %v", err)
|
||||
}
|
||||
}
|
||||
close(p.consumerIncoming)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-p.consumerCtx.Done():
|
||||
return p.consumerCtx.Err()
|
||||
default:
|
||||
ev := p.consumer.Poll(p.consumerPollTimeout)
|
||||
if ev == nil {
|
||||
continue
|
||||
}
|
||||
switch e := ev.(type) {
|
||||
case *kafka.Message:
|
||||
p.consumerIncoming <- e
|
||||
case kafka.Error:
|
||||
// Errors should generally be considered informational, the client will try to automatically recover.
|
||||
// But in here, we choose to terminate the application if all brokers are down.
|
||||
logger.Infof("Error %v: %v", e.Code(), e)
|
||||
if p.consumerErrorHandler != nil {
|
||||
p.consumerErrorHandler(ctx, e)
|
||||
}
|
||||
if e.Code() == kafka.ErrAllBrokersDown {
|
||||
logger.Error("All broker connections are down")
|
||||
return e
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Receive implements Receiver.Receive
|
||||
func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) {
|
||||
select {
|
||||
case m, ok := <-p.consumerIncoming:
|
||||
if !ok {
|
||||
return nil, io.EOF
|
||||
}
|
||||
msg := NewMessage(m)
|
||||
return msg, nil
|
||||
case <-ctx.Done():
|
||||
return nil, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Close cleans up resources after use. Must be called to properly close underlying Kafka resources and avoid resource leaks
|
||||
func (p *Protocol) Close(ctx context.Context) error {
|
||||
p.closerMux.Lock()
|
||||
defer p.closerMux.Unlock()
|
||||
logger := cecontext.LoggerFrom(ctx)
|
||||
|
||||
if p.consumerCancel != nil {
|
||||
p.consumerCancel()
|
||||
}
|
||||
|
||||
if p.producer != nil && !p.producer.IsClosed() {
|
||||
// Flush and close the producer with a 10 seconds timeout (closes Events channel)
|
||||
for p.producer.Flush(10000) > 0 {
|
||||
logger.Info("Flushing outstanding messages")
|
||||
}
|
||||
p.producer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
125
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/write_producer_message.go
generated
vendored
125
vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/write_producer_message.go
generated
vendored
@@ -1,125 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The CloudEvents Authors
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package kafka_confluent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/cloudevents/sdk-go/v2/binding"
|
||||
"github.com/cloudevents/sdk-go/v2/binding/format"
|
||||
"github.com/cloudevents/sdk-go/v2/binding/spec"
|
||||
"github.com/cloudevents/sdk-go/v2/types"
|
||||
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
|
||||
)
|
||||
|
||||
// extends the kafka.Message to support the interfaces for the converting it to binding.Message
|
||||
type kafkaMessageWriter kafka.Message
|
||||
|
||||
var (
|
||||
_ binding.StructuredWriter = (*kafkaMessageWriter)(nil)
|
||||
_ binding.BinaryWriter = (*kafkaMessageWriter)(nil)
|
||||
)
|
||||
|
||||
// WriteProducerMessage fills the provided pubMessage with the message m.
|
||||
// Using context you can tweak the encoding processing (more details on binding.Write documentation).
|
||||
func WriteProducerMessage(ctx context.Context, in binding.Message, kafkaMsg *kafka.Message,
|
||||
transformers ...binding.Transformer,
|
||||
) error {
|
||||
structuredWriter := (*kafkaMessageWriter)(kafkaMsg)
|
||||
binaryWriter := (*kafkaMessageWriter)(kafkaMsg)
|
||||
|
||||
_, err := binding.Write(
|
||||
ctx,
|
||||
in,
|
||||
structuredWriter,
|
||||
binaryWriter,
|
||||
transformers...,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error {
|
||||
b.Headers = []kafka.Header{{
|
||||
Key: contentTypeKey,
|
||||
Value: []byte(f.MediaType()),
|
||||
}}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err := io.Copy(&buf, event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Value = buf.Bytes()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) Start(ctx context.Context) error {
|
||||
b.Headers = []kafka.Header{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) End(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) SetData(reader io.Reader) error {
|
||||
buf, ok := reader.(*bytes.Buffer)
|
||||
if !ok {
|
||||
buf = new(bytes.Buffer)
|
||||
_, err := io.Copy(buf, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.Value = buf.Bytes()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) SetAttribute(attribute spec.Attribute, value interface{}) error {
|
||||
if attribute.Kind() == spec.DataContentType {
|
||||
if value == nil {
|
||||
b.removeProperty(contentTypeKey)
|
||||
return nil
|
||||
}
|
||||
b.addProperty(contentTypeKey, value)
|
||||
} else {
|
||||
key := prefix + attribute.Name()
|
||||
if value == nil {
|
||||
b.removeProperty(key)
|
||||
return nil
|
||||
}
|
||||
b.addProperty(key, value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) SetExtension(name string, value interface{}) error {
|
||||
if value == nil {
|
||||
b.removeProperty(prefix + name)
|
||||
}
|
||||
return b.addProperty(prefix+name, value)
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) removeProperty(key string) {
|
||||
for i, v := range b.Headers {
|
||||
if v.Key == key {
|
||||
b.Headers = append(b.Headers[:i], b.Headers[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *kafkaMessageWriter) addProperty(key string, value interface{}) error {
|
||||
s, err := types.Format(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Headers = append(b.Headers, kafka.Header{Key: key, Value: []byte(s)})
|
||||
return nil
|
||||
}
|
||||
202
vendor/github.com/confluentinc/confluent-kafka-go/v2/LICENSE
generated
vendored
202
vendor/github.com/confluentinc/confluent-kafka-go/v2/LICENSE
generated
vendored
@@ -1,202 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
2
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/.gitignore
generated
vendored
2
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/.gitignore
generated
vendored
@@ -1,2 +0,0 @@
|
||||
testconf.json
|
||||
go_rdkafka_generr/go_rdkafka_generr
|
||||
58
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go
generated
vendored
58
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go
generated
vendored
@@ -1,58 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016-2019 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
/*
|
||||
#include "select_rdkafka.h"
|
||||
|
||||
//Minimum required librdkafka version. This is checked both during
|
||||
//build-time and runtime.
|
||||
//Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error
|
||||
//defines and strings in sync.
|
||||
//
|
||||
|
||||
#define MIN_RD_KAFKA_VERSION 0x02030000
|
||||
|
||||
#ifdef __APPLE__
|
||||
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
|
||||
#else
|
||||
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
|
||||
#endif
|
||||
|
||||
#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION
|
||||
#ifdef __APPLE__
|
||||
#error "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
|
||||
#else
|
||||
#error "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
|
||||
#endif
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func versionCheck() error {
|
||||
ver, verstr := LibraryVersion()
|
||||
if ver < C.MIN_RD_KAFKA_VERSION {
|
||||
return newErrorFromString(ErrNotImplemented,
|
||||
fmt.Sprintf("%s: librdkafka version %s (0x%x) detected",
|
||||
C.MIN_VER_ERRSTR, verstr, ver))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
159
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md
generated
vendored
159
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md
generated
vendored
@@ -1,159 +0,0 @@
|
||||
# Information for confluent-kafka-go developers
|
||||
|
||||
## Development process
|
||||
|
||||
1. Use go1.19 (and related tooling) for development on confluent-kafka-go.
|
||||
2. Make sure to run `gofmt` and `go vet` on your code.
|
||||
3. While there is no hard-limit, try to keep your line length under 80
|
||||
characters.
|
||||
3. [Test](#testing) your changes and create a PR.
|
||||
|
||||
|
||||
NOTE: Whenever librdkafka error codes are updated make sure to run generate
|
||||
before building:
|
||||
|
||||
```
|
||||
$ make -f mk/Makefile generr
|
||||
$ go build ./...
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
Some of the tests included in this directory, the benchmark and integration tests in particular,
|
||||
require an existing Kafka cluster and a testconf.json configuration file to
|
||||
provide tests with bootstrap brokers, topic name, etc.
|
||||
|
||||
The format of testconf.json is a JSON object:
|
||||
```
|
||||
{
|
||||
"Brokers": "<bootstrap-brokers>",
|
||||
"Topic": "<test-topic-name>"
|
||||
}
|
||||
```
|
||||
|
||||
See testconf-example.json for an example and full set of available options.
|
||||
|
||||
|
||||
To run unit-tests:
|
||||
```
|
||||
$ go test
|
||||
```
|
||||
|
||||
To run benchmark tests:
|
||||
```
|
||||
$ go test -bench .
|
||||
```
|
||||
|
||||
For the code coverage:
|
||||
```
|
||||
$ go test -coverprofile=coverage.out -bench=.
|
||||
$ go tool cover -func=coverage.out
|
||||
```
|
||||
|
||||
|
||||
## Build tags
|
||||
|
||||
Different build types are supported through Go build tags (`-tags ..`),
|
||||
these tags should be specified on the **application** build/get/install command.
|
||||
|
||||
* By default the bundled platform-specific static build of librdkafka will
|
||||
be used. This works out of the box on Mac OSX and glibc-based Linux distros,
|
||||
such as Ubuntu and CentOS.
|
||||
* `-tags musl` - must be specified when building on/for musl-based Linux
|
||||
distros, such as Alpine. Will use the bundled static musl build of
|
||||
librdkafka.
|
||||
* `-tags dynamic` - link librdkafka dynamically. A shared librdkafka library
|
||||
must be installed manually through other means (apt-get, yum, build from
|
||||
source, etc).
|
||||
|
||||
|
||||
|
||||
## Release process
|
||||
|
||||
For each release candidate and final release, perform the following steps:
|
||||
|
||||
### Review the CHANGELOG
|
||||
|
||||
### Update bundle to latest librdkafka
|
||||
|
||||
See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md).
|
||||
|
||||
|
||||
### Update librdkafka version requirement
|
||||
|
||||
Update the minimum required librdkafka version in `kafka/00version.go`
|
||||
and `README.md` and the version in `examples/go.mod` and `mk/doc-gen.py`.
|
||||
|
||||
### Update error codes
|
||||
|
||||
Error codes can be automatically generated from the current librdkafka version.
|
||||
|
||||
|
||||
Update generated error codes:
|
||||
|
||||
$ make -f mk/Makefile generr
|
||||
# Verify by building
|
||||
|
||||
|
||||
## Generating HTML documentation
|
||||
|
||||
To generate one-page HTML documentation run the mk/doc-gen.py script from the
|
||||
top-level directory. This script requires the beautifulsoup4 Python package.
|
||||
|
||||
```
|
||||
$ source .../your/virtualenv/bin/activate
|
||||
$ pip install beautifulsoup4
|
||||
...
|
||||
$ make -f mk/Makefile docs
|
||||
```
|
||||
|
||||
|
||||
### Rebuild everything
|
||||
|
||||
$ go clean -i ./...
|
||||
$ go build ./...
|
||||
|
||||
|
||||
### Run full test suite
|
||||
|
||||
Set up a test cluster using whatever mechanism you typically use
|
||||
(docker, trivup, ccloud, ..).
|
||||
|
||||
Make sure to update `kafka/testconf.json` as needed (broker list, $BROKERS)
|
||||
|
||||
Run test suite:
|
||||
|
||||
$ go test ./...
|
||||
|
||||
|
||||
### Verify examples
|
||||
|
||||
Manually verify that the examples/ applications work.
|
||||
|
||||
Also make sure the examples in README.md work.
|
||||
|
||||
### Commit any changes
|
||||
|
||||
Make sure to push to github before creating the tag to have CI tests pass.
|
||||
|
||||
|
||||
### Create and push tag
|
||||
|
||||
$ git tag v1.3.0
|
||||
$ git push --dry-run origin v1.3.0
|
||||
# Remove --dry-run and re-execute if it looks ok.
|
||||
|
||||
|
||||
### Create release notes page on github
|
||||
|
||||
### Update version in Confluent docs
|
||||
|
||||
Put the new version in settings.sh of these two repos
|
||||
|
||||
https://github.com/confluentinc/docs
|
||||
|
||||
https://github.com/confluentinc/docs-platform
|
||||
|
||||
### Don't forget tweeting it!
|
||||
3463
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go
generated
vendored
3463
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go
generated
vendored
File diff suppressed because it is too large
Load Diff
587
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go
generated
vendored
587
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go
generated
vendored
@@ -1,587 +0,0 @@
|
||||
/**
|
||||
* Copyright 2018 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include "select_rdkafka.h"
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// AdminOptionOperationTimeout sets the broker's operation timeout, such as the
|
||||
// timeout for CreateTopics to complete the creation of topics on the controller
|
||||
// before returning a result to the application.
|
||||
//
|
||||
// CreateTopics, DeleteTopics, CreatePartitions:
|
||||
// a value 0 will return immediately after triggering topic
|
||||
// creation, while > 0 will wait this long for topic creation to propagate
|
||||
// in cluster.
|
||||
//
|
||||
// Default: 0 (return immediately).
|
||||
//
|
||||
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
|
||||
type AdminOptionOperationTimeout struct {
|
||||
isSet bool
|
||||
val time.Duration
|
||||
}
|
||||
|
||||
func (ao AdminOptionOperationTimeout) supportsCreateTopics() {
|
||||
}
|
||||
func (ao AdminOptionOperationTimeout) supportsDeleteTopics() {
|
||||
}
|
||||
func (ao AdminOptionOperationTimeout) supportsCreatePartitions() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
|
||||
if !ao.isSet {
|
||||
return nil
|
||||
}
|
||||
|
||||
cErrstrSize := C.size_t(512)
|
||||
cErrstr := (*C.char)(C.malloc(cErrstrSize))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
cErr := C.rd_kafka_AdminOptions_set_operation_timeout(
|
||||
cOptions, C.int(durationToMilliseconds(ao.val)),
|
||||
cErrstr, cErrstrSize)
|
||||
if cErr != 0 {
|
||||
C.rd_kafka_AdminOptions_destroy(cOptions)
|
||||
return newCErrorFromString(cErr,
|
||||
fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr)))
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAdminOperationTimeout sets the broker's operation timeout, such as the
|
||||
// timeout for CreateTopics to complete the creation of topics on the controller
|
||||
// before returning a result to the application.
|
||||
//
|
||||
// CreateTopics, DeleteTopics, CreatePartitions:
|
||||
// a value 0 will return immediately after triggering topic
|
||||
// creation, while > 0 will wait this long for topic creation to propagate
|
||||
// in cluster.
|
||||
//
|
||||
// Default: 0 (return immediately).
|
||||
//
|
||||
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
|
||||
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) {
|
||||
ao.isSet = true
|
||||
ao.val = t
|
||||
return ao
|
||||
}
|
||||
|
||||
// AdminOptionRequestTimeout sets the overall request timeout, including broker
|
||||
// lookup, request transmission, operation time on broker, and response.
|
||||
//
|
||||
// Default: `socket.timeout.ms`.
|
||||
//
|
||||
// Valid for all Admin API methods.
|
||||
type AdminOptionRequestTimeout struct {
|
||||
isSet bool
|
||||
val time.Duration
|
||||
}
|
||||
|
||||
func (ao AdminOptionRequestTimeout) supportsCreateTopics() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsDeleteTopics() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsCreatePartitions() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsAlterConfigs() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionRequestTimeout) supportsCreateACLs() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionRequestTimeout) supportsDescribeACLs() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionRequestTimeout) supportsDeleteACLs() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionRequestTimeout) supportsListConsumerGroups() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsDescribeConsumerGroups() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsDescribeTopics() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsDescribeCluster() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsDeleteConsumerGroups() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsListConsumerGroupOffsets() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsAlterConsumerGroupOffsets() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsListOffsets() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsDescribeUserScramCredentials() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) supportsAlterUserScramCredentials() {
|
||||
}
|
||||
func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
|
||||
if !ao.isSet {
|
||||
return nil
|
||||
}
|
||||
|
||||
cErrstrSize := C.size_t(512)
|
||||
cErrstr := (*C.char)(C.malloc(cErrstrSize))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
cErr := C.rd_kafka_AdminOptions_set_request_timeout(
|
||||
cOptions, C.int(durationToMilliseconds(ao.val)),
|
||||
cErrstr, cErrstrSize)
|
||||
if cErr != 0 {
|
||||
C.rd_kafka_AdminOptions_destroy(cOptions)
|
||||
return newCErrorFromString(cErr,
|
||||
fmt.Sprintf("%s", C.GoString(cErrstr)))
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAdminRequestTimeout sets the overall request timeout, including broker
|
||||
// lookup, request transmission, operation time on broker, and response.
|
||||
//
|
||||
// Default: `socket.timeout.ms`.
|
||||
//
|
||||
// Valid for all Admin API methods.
|
||||
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) {
|
||||
ao.isSet = true
|
||||
ao.val = t
|
||||
return ao
|
||||
}
|
||||
|
||||
// IsolationLevel is a type which is used for AdminOptions to set the IsolationLevel.
|
||||
type IsolationLevel int
|
||||
|
||||
const (
|
||||
// IsolationLevelReadUncommitted - read uncommitted isolation level
|
||||
IsolationLevelReadUncommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED)
|
||||
// IsolationLevelReadCommitted - read committed isolation level
|
||||
IsolationLevelReadCommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED)
|
||||
)
|
||||
|
||||
// AdminOptionIsolationLevel sets the overall request IsolationLevel.
|
||||
//
|
||||
// Default: `ReadUncommitted`.
|
||||
//
|
||||
// Valid for ListOffsets.
|
||||
type AdminOptionIsolationLevel struct {
|
||||
isSet bool
|
||||
val IsolationLevel
|
||||
}
|
||||
|
||||
func (ao AdminOptionIsolationLevel) supportsListOffsets() {
|
||||
}
|
||||
func (ao AdminOptionIsolationLevel) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
|
||||
if !ao.isSet {
|
||||
return nil
|
||||
}
|
||||
|
||||
cErrstrSize := C.size_t(512)
|
||||
cErrstr := (*C.char)(C.malloc(cErrstrSize))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
cError := C.rd_kafka_AdminOptions_set_isolation_level(
|
||||
cOptions, C.rd_kafka_IsolationLevel_t(ao.val))
|
||||
if cError != nil {
|
||||
C.rd_kafka_AdminOptions_destroy(cOptions)
|
||||
return newErrorFromCErrorDestroy(cError)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// SetAdminIsolationLevel sets the overall IsolationLevel for a request.
|
||||
//
|
||||
// Default: `ReadUncommitted`.
|
||||
//
|
||||
// Valid for ListOffsets.
|
||||
func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel) {
|
||||
ao.isSet = true
|
||||
ao.val = isolationLevel
|
||||
return ao
|
||||
}
|
||||
|
||||
// AdminOptionValidateOnly tells the broker to only validate the request,
|
||||
// without performing the requested operation (create topics, etc).
|
||||
//
|
||||
// Default: false.
|
||||
//
|
||||
// Valid for CreateTopics, CreatePartitions, AlterConfigs
|
||||
type AdminOptionValidateOnly struct {
|
||||
isSet bool
|
||||
val bool
|
||||
}
|
||||
|
||||
func (ao AdminOptionValidateOnly) supportsCreateTopics() {
|
||||
}
|
||||
func (ao AdminOptionValidateOnly) supportsCreatePartitions() {
|
||||
}
|
||||
func (ao AdminOptionValidateOnly) supportsAlterConfigs() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
|
||||
if !ao.isSet {
|
||||
return nil
|
||||
}
|
||||
|
||||
cErrstrSize := C.size_t(512)
|
||||
cErrstr := (*C.char)(C.malloc(cErrstrSize))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
cErr := C.rd_kafka_AdminOptions_set_validate_only(
|
||||
cOptions, bool2cint(ao.val),
|
||||
cErrstr, cErrstrSize)
|
||||
if cErr != 0 {
|
||||
C.rd_kafka_AdminOptions_destroy(cOptions)
|
||||
return newCErrorFromString(cErr,
|
||||
fmt.Sprintf("%s", C.GoString(cErrstr)))
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAdminValidateOnly tells the broker to only validate the request,
|
||||
// without performing the requested operation (create topics, etc).
|
||||
//
|
||||
// Default: false.
|
||||
//
|
||||
// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs
|
||||
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) {
|
||||
ao.isSet = true
|
||||
ao.val = validateOnly
|
||||
return ao
|
||||
}
|
||||
|
||||
// AdminOptionRequireStableOffsets decides if the broker should return stable
|
||||
// offsets (transaction-committed).
|
||||
//
|
||||
// Default: false
|
||||
//
|
||||
// Valid for ListConsumerGroupOffsets.
|
||||
type AdminOptionRequireStableOffsets struct {
|
||||
isSet bool
|
||||
val bool
|
||||
}
|
||||
|
||||
func (ao AdminOptionRequireStableOffsets) supportsListConsumerGroupOffsets() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionRequireStableOffsets) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
|
||||
if !ao.isSet {
|
||||
return nil
|
||||
}
|
||||
|
||||
cError := C.rd_kafka_AdminOptions_set_require_stable_offsets(
|
||||
cOptions, bool2cint(ao.val))
|
||||
if cError != nil {
|
||||
C.rd_kafka_AdminOptions_destroy(cOptions)
|
||||
return newErrorFromCErrorDestroy(cError)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAdminRequireStableOffsets decides if the broker should return stable
|
||||
// offsets (transaction-committed).
|
||||
//
|
||||
// Default: false
|
||||
//
|
||||
// Valid for ListConsumerGroupOffsets.
|
||||
func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets) {
|
||||
ao.isSet = true
|
||||
ao.val = val
|
||||
return ao
|
||||
}
|
||||
|
||||
// AdminOptionMatchConsumerGroupStates decides groups in which state(s) should be
|
||||
// listed.
|
||||
//
|
||||
// Default: nil (lists groups in all states).
|
||||
//
|
||||
// Valid for ListConsumerGroups.
|
||||
type AdminOptionMatchConsumerGroupStates struct {
|
||||
isSet bool
|
||||
val []ConsumerGroupState
|
||||
}
|
||||
|
||||
func (ao AdminOptionMatchConsumerGroupStates) supportsListConsumerGroups() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionMatchConsumerGroupStates) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
|
||||
if !ao.isSet || ao.val == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert states from Go slice to C pointer.
|
||||
cStates := make([]C.rd_kafka_consumer_group_state_t, len(ao.val))
|
||||
cStatesCount := C.size_t(len(ao.val))
|
||||
|
||||
for idx, state := range ao.val {
|
||||
cStates[idx] = C.rd_kafka_consumer_group_state_t(state)
|
||||
}
|
||||
|
||||
cStatesPtr := ((*C.rd_kafka_consumer_group_state_t)(&cStates[0]))
|
||||
cError := C.rd_kafka_AdminOptions_set_match_consumer_group_states(
|
||||
cOptions, cStatesPtr, cStatesCount)
|
||||
if cError != nil {
|
||||
C.rd_kafka_AdminOptions_destroy(cOptions)
|
||||
return newErrorFromCErrorDestroy(cError)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdminOptionIncludeAuthorizedOperations decides if the broker should return
|
||||
// authorized operations.
|
||||
//
|
||||
// Default: false
|
||||
//
|
||||
// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster.
|
||||
type AdminOptionIncludeAuthorizedOperations struct {
|
||||
isSet bool
|
||||
val bool
|
||||
}
|
||||
|
||||
func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeConsumerGroups() {
|
||||
}
|
||||
func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeTopics() {
|
||||
}
|
||||
func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeCluster() {
|
||||
}
|
||||
|
||||
func (ao AdminOptionIncludeAuthorizedOperations) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
|
||||
if !ao.isSet {
|
||||
return nil
|
||||
}
|
||||
|
||||
cError := C.rd_kafka_AdminOptions_set_include_authorized_operations(
|
||||
cOptions, bool2cint(ao.val))
|
||||
if cError != nil {
|
||||
C.rd_kafka_AdminOptions_destroy(cOptions)
|
||||
return newErrorFromCErrorDestroy(cError)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAdminOptionIncludeAuthorizedOperations decides if the broker should return
|
||||
// authorized operations.
|
||||
//
|
||||
// Default: false
|
||||
//
|
||||
// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster.
|
||||
func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations) {
|
||||
ao.isSet = true
|
||||
ao.val = val
|
||||
return ao
|
||||
}
|
||||
|
||||
// SetAdminMatchConsumerGroupStates decides groups in which state(s) should be
|
||||
// listed.
|
||||
//
|
||||
// Default: nil (lists groups in all states).
|
||||
//
|
||||
// Valid for ListConsumerGroups.
|
||||
func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates) {
|
||||
ao.isSet = true
|
||||
ao.val = val
|
||||
return ao
|
||||
}
|
||||
|
||||
// CreateTopicsAdminOption - see setters.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
|
||||
type CreateTopicsAdminOption interface {
|
||||
supportsCreateTopics()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DeleteTopicsAdminOption - see setters.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminOperationTimeout.
|
||||
type DeleteTopicsAdminOption interface {
|
||||
supportsDeleteTopics()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// CreatePartitionsAdminOption - see setters.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
|
||||
type CreatePartitionsAdminOption interface {
|
||||
supportsCreatePartitions()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// AlterConfigsAdminOption - see setters.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental.
|
||||
type AlterConfigsAdminOption interface {
|
||||
supportsAlterConfigs()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DescribeConfigsAdminOption - see setters.
|
||||
//
|
||||
// See SetAdminRequestTimeout.
|
||||
type DescribeConfigsAdminOption interface {
|
||||
supportsDescribeConfigs()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// CreateACLsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout
|
||||
type CreateACLsAdminOption interface {
|
||||
supportsCreateACLs()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DescribeACLsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout
|
||||
type DescribeACLsAdminOption interface {
|
||||
supportsDescribeACLs()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DeleteACLsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout
|
||||
type DeleteACLsAdminOption interface {
|
||||
supportsDeleteACLs()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// ListConsumerGroupsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminMatchConsumerGroupStates.
|
||||
type ListConsumerGroupsAdminOption interface {
|
||||
supportsListConsumerGroups()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DescribeConsumerGroupsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations.
|
||||
type DescribeConsumerGroupsAdminOption interface {
|
||||
supportsDescribeConsumerGroups()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DescribeTopicsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations.
|
||||
type DescribeTopicsAdminOption interface {
|
||||
supportsDescribeTopics()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DescribeClusterAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations.
|
||||
type DescribeClusterAdminOption interface {
|
||||
supportsDescribeCluster()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DeleteConsumerGroupsAdminOption - see setters.
|
||||
//
|
||||
// See SetAdminRequestTimeout.
|
||||
type DeleteConsumerGroupsAdminOption interface {
|
||||
supportsDeleteConsumerGroups()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// ListConsumerGroupOffsetsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminRequireStableOffsets.
|
||||
type ListConsumerGroupOffsetsAdminOption interface {
|
||||
supportsListConsumerGroupOffsets()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// AlterConsumerGroupOffsetsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout.
|
||||
type AlterConsumerGroupOffsetsAdminOption interface {
|
||||
supportsAlterConsumerGroupOffsets()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// DescribeUserScramCredentialsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout.
|
||||
type DescribeUserScramCredentialsAdminOption interface {
|
||||
supportsDescribeUserScramCredentials()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// AlterUserScramCredentialsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout.
|
||||
type AlterUserScramCredentialsAdminOption interface {
|
||||
supportsAlterUserScramCredentials()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// ListOffsetsAdminOption - see setter.
|
||||
//
|
||||
// See SetAdminRequestTimeout, SetAdminIsolationLevel.
|
||||
type ListOffsetsAdminOption interface {
|
||||
supportsListOffsets()
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
// AdminOption is a generic type not to be used directly.
|
||||
//
|
||||
// See CreateTopicsAdminOption et.al.
|
||||
type AdminOption interface {
|
||||
apply(cOptions *C.rd_kafka_AdminOptions_t) error
|
||||
}
|
||||
|
||||
func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) {
|
||||
|
||||
cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType)
|
||||
for _, opt := range options {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
err := opt.apply(cOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cOptions, nil
|
||||
}
|
||||
7595
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html
generated
vendored
7595
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html
generated
vendored
File diff suppressed because it is too large
Load Diff
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go
generated
vendored
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !dynamic
|
||||
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_amd64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static darwin_amd64 from librdkafka-static-bundle-v2.3.0.tgz"
|
||||
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go
generated
vendored
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !dynamic
|
||||
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_arm64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static darwin_arm64 from librdkafka-static-bundle-v2.3.0.tgz"
|
||||
10
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go
generated
vendored
10
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
//go:build dynamic
|
||||
// +build dynamic
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo pkg-config: rdkafka
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "dynamically linked to librdkafka"
|
||||
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go
generated
vendored
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !dynamic
|
||||
// +build !musl
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_amd64.a -lm -ldl -lpthread -lrt
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static glibc_linux_amd64 from librdkafka-static-bundle-v2.3.0.tgz"
|
||||
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go
generated
vendored
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !dynamic
|
||||
// +build !musl
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_arm64.a -lm -ldl -lpthread -lrt
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static glibc_linux_arm64 from librdkafka-static-bundle-v2.3.0.tgz"
|
||||
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go
generated
vendored
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !dynamic
|
||||
// +build musl
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_amd64.a -lm -ldl -lpthread -lrt -lpthread -lrt
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static musl_linux_amd64 from librdkafka-static-bundle-v2.3.0.tgz"
|
||||
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go
generated
vendored
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !dynamic
|
||||
// +build musl
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_arm64.a -lm -ldl -lpthread -lrt -lpthread -lrt
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static musl_linux_arm64 from librdkafka-static-bundle-v2.3.0.tgz"
|
||||
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go
generated
vendored
13
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !dynamic
|
||||
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_windows.a -lws2_32 -lsecur32 -lcrypt32
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v2.3.0.tgz"
|
||||
299
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go
generated
vendored
299
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go
generated
vendored
@@ -1,299 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// ConfigValue supports the following types:
|
||||
//
|
||||
// bool, int, string, any type with the standard String() interface
|
||||
type ConfigValue interface{}
|
||||
|
||||
// ConfigMap is a map containing standard librdkafka configuration properties as documented in:
|
||||
// https://github.com/confluentinc/librdkafka/tree/master/CONFIGURATION.md
|
||||
//
|
||||
// The special property "default.topic.config" (optional) is a ConfigMap
|
||||
// containing default topic configuration properties.
|
||||
//
|
||||
// The use of "default.topic.config" is deprecated,
|
||||
// topic configuration properties shall be specified in the standard ConfigMap.
|
||||
// For backwards compatibility, "default.topic.config" (if supplied)
|
||||
// takes precedence.
|
||||
type ConfigMap map[string]ConfigValue
|
||||
|
||||
// SetKey sets configuration property key to value.
|
||||
//
|
||||
// For user convenience a key prefixed with {topic}. will be
|
||||
// set on the "default.topic.config" sub-map, this use is deprecated.
|
||||
func (m ConfigMap) SetKey(key string, value ConfigValue) error {
|
||||
if strings.HasPrefix(key, "{topic}.") {
|
||||
_, found := m["default.topic.config"]
|
||||
if !found {
|
||||
m["default.topic.config"] = ConfigMap{}
|
||||
}
|
||||
m["default.topic.config"].(ConfigMap)[strings.TrimPrefix(key, "{topic}.")] = value
|
||||
} else {
|
||||
m[key] = value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set implements flag.Set (command line argument parser) as a convenience
|
||||
// for `-X key=value` config.
|
||||
func (m ConfigMap) Set(kv string) error {
|
||||
i := strings.Index(kv, "=")
|
||||
if i == -1 {
|
||||
return newErrorFromString(ErrInvalidArg, "Expected key=value")
|
||||
}
|
||||
|
||||
k := kv[:i]
|
||||
v := kv[i+1:]
|
||||
|
||||
return m.SetKey(k, v)
|
||||
}
|
||||
|
||||
func value2string(v ConfigValue) (ret string, errstr string) {
|
||||
|
||||
errstr = ""
|
||||
switch x := v.(type) {
|
||||
case bool:
|
||||
if x {
|
||||
ret = "true"
|
||||
} else {
|
||||
ret = "false"
|
||||
}
|
||||
case int:
|
||||
ret = fmt.Sprintf("%d", x)
|
||||
case string:
|
||||
ret = x
|
||||
case types.Slice:
|
||||
ret = ""
|
||||
arr := v.([]ConfigValue)
|
||||
for _, i := range arr {
|
||||
temp, err := value2string(i)
|
||||
if err != "" {
|
||||
ret = ""
|
||||
errstr = fmt.Sprintf("Invalid value type %T", v)
|
||||
break
|
||||
}
|
||||
ret += temp + ","
|
||||
}
|
||||
if len(ret) != 0 {
|
||||
ret = ret[:len(ret)-1]
|
||||
}
|
||||
case fmt.Stringer:
|
||||
ret = x.String()
|
||||
default:
|
||||
ret = ""
|
||||
errstr = fmt.Sprintf("Invalid value type %T", v)
|
||||
}
|
||||
|
||||
return ret, errstr
|
||||
}
|
||||
|
||||
// rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t
|
||||
// into a common interface.
|
||||
type rdkAnyconf interface {
|
||||
set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t
|
||||
}
|
||||
|
||||
func anyconfSet(anyconf rdkAnyconf, key string, val ConfigValue) (err error) {
|
||||
value, errstr := value2string(val)
|
||||
if errstr != "" {
|
||||
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s for key %s (expected string,bool,int,ConfigMap)", errstr, key))
|
||||
}
|
||||
cKey := C.CString(key)
|
||||
defer C.free(unsafe.Pointer(cKey))
|
||||
cVal := C.CString(value)
|
||||
defer C.free(unsafe.Pointer(cVal))
|
||||
cErrstr := (*C.char)(C.malloc(C.size_t(128)))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
if anyconf.set(cKey, cVal, cErrstr, 128) != C.RD_KAFKA_CONF_OK {
|
||||
return newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// we need these typedefs to workaround a crash in golint
|
||||
// when parsing the set() methods below
|
||||
type rdkConf C.rd_kafka_conf_t
|
||||
type rdkTopicConf C.rd_kafka_topic_conf_t
|
||||
|
||||
func (cConf *rdkConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t {
|
||||
return C.rd_kafka_conf_set((*C.rd_kafka_conf_t)(cConf), cKey, cVal, cErrstr, C.size_t(errstrSize))
|
||||
}
|
||||
|
||||
func (ctopicConf *rdkTopicConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t {
|
||||
return C.rd_kafka_topic_conf_set((*C.rd_kafka_topic_conf_t)(ctopicConf), cKey, cVal, cErrstr, C.size_t(errstrSize))
|
||||
}
|
||||
|
||||
func configConvertAnyconf(m ConfigMap, anyconf rdkAnyconf) (err error) {
|
||||
// set plugins first, any plugin-specific configuration depends on
|
||||
// the plugin to have already been set
|
||||
pluginPaths, ok := m["plugin.library.paths"]
|
||||
if ok {
|
||||
err = anyconfSet(anyconf, "plugin.library.paths", pluginPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for k, v := range m {
|
||||
if k == "plugin.library.paths" {
|
||||
continue
|
||||
}
|
||||
switch v.(type) {
|
||||
case ConfigMap:
|
||||
/* Special sub-ConfigMap, only used for default.topic.config */
|
||||
|
||||
if k != "default.topic.config" {
|
||||
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("Invalid type for key %s", k))
|
||||
}
|
||||
|
||||
var cTopicConf = C.rd_kafka_topic_conf_new()
|
||||
|
||||
err = configConvertAnyconf(v.(ConfigMap),
|
||||
(*rdkTopicConf)(cTopicConf))
|
||||
if err != nil {
|
||||
C.rd_kafka_topic_conf_destroy(cTopicConf)
|
||||
return err
|
||||
}
|
||||
|
||||
C.rd_kafka_conf_set_default_topic_conf(
|
||||
(*C.rd_kafka_conf_t)(anyconf.(*rdkConf)),
|
||||
(*C.rd_kafka_topic_conf_t)((*rdkTopicConf)(cTopicConf)))
|
||||
|
||||
default:
|
||||
err = anyconfSet(anyconf, k, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// convert ConfigMap to C rd_kafka_conf_t *
|
||||
func (m ConfigMap) convert() (cConf *C.rd_kafka_conf_t, err error) {
|
||||
cConf = C.rd_kafka_conf_new()
|
||||
|
||||
// Set the client.software.name and .version (use librdkafka version).
|
||||
_, librdkafkaVersion := LibraryVersion()
|
||||
anyconfSet((*rdkConf)(cConf), "client.software.name", "confluent-kafka-go")
|
||||
anyconfSet((*rdkConf)(cConf), "client.software.version", librdkafkaVersion)
|
||||
|
||||
err = configConvertAnyconf(m, (*rdkConf)(cConf))
|
||||
if err != nil {
|
||||
C.rd_kafka_conf_destroy(cConf)
|
||||
return nil, err
|
||||
}
|
||||
return cConf, nil
|
||||
}
|
||||
|
||||
// get finds key in the configmap and returns its value.
|
||||
// If the key is not found defval is returned.
|
||||
// If the key is found but the type is mismatched an error is returned.
|
||||
func (m ConfigMap) get(key string, defval ConfigValue) (ConfigValue, error) {
|
||||
if strings.HasPrefix(key, "{topic}.") {
|
||||
defconfCv, found := m["default.topic.config"]
|
||||
if !found {
|
||||
return defval, nil
|
||||
}
|
||||
return defconfCv.(ConfigMap).get(strings.TrimPrefix(key, "{topic}."), defval)
|
||||
}
|
||||
|
||||
v, ok := m[key]
|
||||
if !ok {
|
||||
return defval, nil
|
||||
}
|
||||
|
||||
if defval != nil && reflect.TypeOf(defval) != reflect.TypeOf(v) {
|
||||
return nil, newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s expects type %T, not %T", key, defval, v))
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// extract performs a get() and if found deletes the key.
|
||||
func (m ConfigMap) extract(key string, defval ConfigValue) (ConfigValue, error) {
|
||||
|
||||
v, err := m.get(key, defval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delete(m, key)
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// extractLogConfig extracts generic go.logs.* configuration properties.
|
||||
func (m ConfigMap) extractLogConfig() (logsChanEnable bool, logsChan chan LogEvent, err error) {
|
||||
v, err := m.extract("go.logs.channel.enable", false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
logsChanEnable = v.(bool)
|
||||
|
||||
v, err = m.extract("go.logs.channel", nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v != nil {
|
||||
logsChan = v.(chan LogEvent)
|
||||
}
|
||||
|
||||
if logsChanEnable {
|
||||
// Tell librdkafka to forward logs to the log queue
|
||||
m.Set("log.queue=true")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (m ConfigMap) clone() ConfigMap {
|
||||
m2 := make(ConfigMap)
|
||||
for k, v := range m {
|
||||
m2[k] = v
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
||||
// Get finds the given key in the ConfigMap and returns its value.
|
||||
// If the key is not found `defval` is returned.
|
||||
// If the key is found but the type does not match that of `defval` (unless nil)
|
||||
// an ErrInvalidArg error is returned.
|
||||
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) {
|
||||
return m.get(key, defval)
|
||||
}
|
||||
1103
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go
generated
vendored
1103
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go
generated
vendored
File diff suppressed because it is too large
Load Diff
31
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/context.go
generated
vendored
31
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/context.go
generated
vendored
@@ -1,31 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Timeout returns the remaining time after which work done on behalf of this context should be
|
||||
// canceled, or ok==false if no deadline/timeout is set.
|
||||
func timeout(ctx context.Context) (timeout time.Duration, ok bool) {
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
return deadline.Sub(time.Now()), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
169
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go
generated
vendored
169
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go
generated
vendored
@@ -1,169 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
// Automatically generate error codes from librdkafka
|
||||
// See README for instructions
|
||||
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Error provides a Kafka-specific error container
|
||||
type Error struct {
|
||||
code ErrorCode
|
||||
str string
|
||||
fatal bool
|
||||
retriable bool
|
||||
txnRequiresAbort bool
|
||||
}
|
||||
|
||||
func newError(code C.rd_kafka_resp_err_t) (err Error) {
|
||||
return Error{code: ErrorCode(code)}
|
||||
}
|
||||
|
||||
// NewError creates a new Error.
|
||||
func NewError(code ErrorCode, str string, fatal bool) (err Error) {
|
||||
return Error{code: code, str: str, fatal: fatal}
|
||||
}
|
||||
|
||||
func newErrorFromString(code ErrorCode, str string) (err Error) {
|
||||
return Error{code: code, str: str}
|
||||
}
|
||||
|
||||
func newErrorFromCString(code C.rd_kafka_resp_err_t, cstr *C.char) (err Error) {
|
||||
var str string
|
||||
if cstr != nil {
|
||||
str = C.GoString(cstr)
|
||||
} else {
|
||||
str = ""
|
||||
}
|
||||
return Error{code: ErrorCode(code), str: str}
|
||||
}
|
||||
|
||||
func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) {
|
||||
return newErrorFromString(ErrorCode(code), str)
|
||||
}
|
||||
|
||||
// newErrorFromCError creates a new Error instance
|
||||
func newErrorFromCError(cError *C.rd_kafka_error_t) Error {
|
||||
return Error{
|
||||
code: ErrorCode(C.rd_kafka_error_code(cError)),
|
||||
str: C.GoString(C.rd_kafka_error_string(cError)),
|
||||
fatal: cint2bool(C.rd_kafka_error_is_fatal(cError)),
|
||||
retriable: cint2bool(C.rd_kafka_error_is_retriable(cError)),
|
||||
txnRequiresAbort: cint2bool(C.rd_kafka_error_txn_requires_abort(cError)),
|
||||
}
|
||||
}
|
||||
|
||||
// newErrorFromCErrorDestroy creates a new Error instance and destroys
|
||||
// the passed cError.
|
||||
func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error {
|
||||
defer C.rd_kafka_error_destroy(cError)
|
||||
return newErrorFromCError(cError)
|
||||
}
|
||||
|
||||
// Error returns a human readable representation of an Error
|
||||
// Same as Error.String()
|
||||
func (e Error) Error() string {
|
||||
return e.String()
|
||||
}
|
||||
|
||||
// String returns a human readable representation of an Error
|
||||
func (e Error) String() string {
|
||||
var errstr string
|
||||
if len(e.str) > 0 {
|
||||
errstr = e.str
|
||||
} else {
|
||||
errstr = e.code.String()
|
||||
}
|
||||
|
||||
if e.IsFatal() {
|
||||
return fmt.Sprintf("Fatal error: %s", errstr)
|
||||
}
|
||||
|
||||
return errstr
|
||||
}
|
||||
|
||||
// Code returns the ErrorCode of an Error
|
||||
func (e Error) Code() ErrorCode {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// IsFatal returns true if the error is a fatal error.
|
||||
// A fatal error indicates the client instance is no longer operable and
|
||||
// should be terminated. Typical causes include non-recoverable
|
||||
// idempotent producer errors.
|
||||
func (e Error) IsFatal() bool {
|
||||
return e.fatal
|
||||
}
|
||||
|
||||
// IsRetriable returns true if the operation that caused this error
|
||||
// may be retried.
|
||||
// This flag is currently only set by the Transactional producer API.
|
||||
func (e Error) IsRetriable() bool {
|
||||
return e.retriable
|
||||
}
|
||||
|
||||
// IsTimeout returns true if the error is a timeout error.
|
||||
// A timeout error indicates that the operation timed out locally.
|
||||
func (e Error) IsTimeout() bool {
|
||||
return e.code == ErrTimedOut || e.code == ErrTimedOutQueue
|
||||
}
|
||||
|
||||
// TxnRequiresAbort returns true if the error is an abortable transaction error
|
||||
// that requires the application to abort the current transaction with
|
||||
// AbortTransaction() and start a new transaction with BeginTransaction()
|
||||
// if it wishes to proceed with transactional operations.
|
||||
// This flag is only set by the Transactional producer API.
|
||||
func (e Error) TxnRequiresAbort() bool {
|
||||
return e.txnRequiresAbort
|
||||
}
|
||||
|
||||
// getFatalError returns an Error object if the client instance has raised a fatal error, else nil.
|
||||
func getFatalError(H Handle) error {
|
||||
cErrstr := (*C.char)(C.malloc(C.size_t(512)))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
cErr := C.rd_kafka_fatal_error(H.gethandle().rk, cErrstr, 512)
|
||||
if int(cErr) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := newErrorFromCString(cErr, cErrstr)
|
||||
err.fatal = true
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// testFatalError triggers a fatal error in the underlying client.
|
||||
// This is to be used strictly for testing purposes.
|
||||
func testFatalError(H Handle, code ErrorCode, str string) ErrorCode {
|
||||
return ErrorCode(C.rd_kafka_test_fatal_error(H.gethandle().rk, C.rd_kafka_resp_err_t(code), C.CString(str)))
|
||||
}
|
||||
|
||||
func getOperationNotAllowedErrorForClosedClient() error {
|
||||
return newErrorFromString(ErrState, "Operation not allowed on closed client")
|
||||
}
|
||||
112
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go
generated
vendored
112
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go
generated
vendored
@@ -1,112 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
// Automatically generate error codes from librdkafka
|
||||
// See README for instructions
|
||||
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
|
||||
static const char *errdesc_to_string (const struct rd_kafka_err_desc *ed, int idx) {
|
||||
return ed[idx].name;
|
||||
}
|
||||
|
||||
static const char *errdesc_to_desc (const struct rd_kafka_err_desc *ed, int idx) {
|
||||
return ed[idx].desc;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// camelCase transforms a snake_case string to camelCase.
|
||||
func camelCase(s string) string {
|
||||
ret := ""
|
||||
for _, v := range strings.Split(s, "_") {
|
||||
if len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
ret += strings.ToUpper((string)(v[0])) + strings.ToLower(v[1:])
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// WriteErrorCodes writes Go error code constants to file from the
|
||||
// librdkafka error codes.
|
||||
// This function is not intended for public use.
|
||||
func WriteErrorCodes(f *os.File) {
|
||||
f.WriteString("package kafka\n\n")
|
||||
now := time.Now()
|
||||
f.WriteString(fmt.Sprintf("// Copyright 2016-%d Confluent Inc.\n", now.Year()))
|
||||
f.WriteString(fmt.Sprintf("// AUTOMATICALLY GENERATED ON %v USING librdkafka %s\n",
|
||||
now, C.GoString(C.rd_kafka_version_str())))
|
||||
|
||||
var errdescs *C.struct_rd_kafka_err_desc
|
||||
var csize C.size_t
|
||||
C.rd_kafka_get_err_descs(&errdescs, &csize)
|
||||
|
||||
f.WriteString(`
|
||||
/*
|
||||
#include "select_rdkafka.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// ErrorCode is the integer representation of local and broker error codes
|
||||
type ErrorCode int
|
||||
|
||||
// String returns a human readable representation of an error code
|
||||
func (c ErrorCode) String() string {
|
||||
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c)))
|
||||
}
|
||||
|
||||
const (
|
||||
`)
|
||||
|
||||
for i := 0; i < int(csize); i++ {
|
||||
orig := C.GoString(C.errdesc_to_string(errdescs, C.int(i)))
|
||||
if len(orig) == 0 {
|
||||
continue
|
||||
}
|
||||
desc := C.GoString(C.errdesc_to_desc(errdescs, C.int(i)))
|
||||
if len(desc) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
errname := "Err" + camelCase(orig)
|
||||
|
||||
// Special handling to please golint
|
||||
// Eof -> EOF
|
||||
// Id -> ID
|
||||
errname = strings.Replace(errname, "Eof", "EOF", -1)
|
||||
errname = strings.Replace(errname, "Id", "ID", -1)
|
||||
|
||||
f.WriteString(fmt.Sprintf("\t// %s %s\n", errname, desc))
|
||||
f.WriteString(fmt.Sprintf("\t%s ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_%s)\n",
|
||||
errname, orig))
|
||||
}
|
||||
|
||||
f.WriteString(")\n")
|
||||
|
||||
}
|
||||
316
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/event.go
generated
vendored
316
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/event.go
generated
vendored
@@ -1,316 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
#include "glue_rdkafka.h"
|
||||
|
||||
|
||||
void chdrs_to_tmphdrs (glue_msg_t *gMsg) {
|
||||
size_t i = 0;
|
||||
const char *name;
|
||||
const void *val;
|
||||
size_t size;
|
||||
rd_kafka_headers_t *chdrs;
|
||||
|
||||
if (rd_kafka_message_headers(gMsg->msg, &chdrs)) {
|
||||
gMsg->tmphdrs = NULL;
|
||||
gMsg->tmphdrsCnt = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
gMsg->tmphdrsCnt = rd_kafka_header_cnt(chdrs);
|
||||
gMsg->tmphdrs = malloc(sizeof(*gMsg->tmphdrs) * gMsg->tmphdrsCnt);
|
||||
|
||||
while (!rd_kafka_header_get_all(chdrs, i,
|
||||
&gMsg->tmphdrs[i].key,
|
||||
&gMsg->tmphdrs[i].val,
|
||||
(size_t *)&gMsg->tmphdrs[i].size))
|
||||
i++;
|
||||
}
|
||||
|
||||
rd_kafka_event_t *_rk_queue_poll (rd_kafka_queue_t *rkq, int timeoutMs,
|
||||
rd_kafka_event_type_t *evtype,
|
||||
glue_msg_t *gMsg,
|
||||
rd_kafka_event_t *prev_rkev) {
|
||||
rd_kafka_event_t *rkev;
|
||||
|
||||
if (prev_rkev)
|
||||
rd_kafka_event_destroy(prev_rkev);
|
||||
|
||||
rkev = rd_kafka_queue_poll(rkq, timeoutMs);
|
||||
*evtype = rd_kafka_event_type(rkev);
|
||||
|
||||
if (*evtype == RD_KAFKA_EVENT_FETCH) {
|
||||
gMsg->msg = (rd_kafka_message_t *)rd_kafka_event_message_next(rkev);
|
||||
gMsg->ts = rd_kafka_message_timestamp(gMsg->msg, &gMsg->tstype);
|
||||
|
||||
if (gMsg->want_hdrs)
|
||||
chdrs_to_tmphdrs(gMsg);
|
||||
}
|
||||
|
||||
return rkev;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func chdrsToTmphdrs(gMsg *C.glue_msg_t) {
|
||||
C.chdrs_to_tmphdrs(gMsg)
|
||||
}
|
||||
|
||||
// Event generic interface
|
||||
type Event interface {
|
||||
// String returns a human-readable representation of the event
|
||||
String() string
|
||||
}
|
||||
|
||||
// Specific event types
|
||||
|
||||
// Stats statistics event
|
||||
type Stats struct {
|
||||
statsJSON string
|
||||
}
|
||||
|
||||
func (e Stats) String() string {
|
||||
return e.statsJSON
|
||||
}
|
||||
|
||||
// AssignedPartitions consumer group rebalance event: assigned partition set
|
||||
type AssignedPartitions struct {
|
||||
Partitions []TopicPartition
|
||||
}
|
||||
|
||||
func (e AssignedPartitions) String() string {
|
||||
return fmt.Sprintf("AssignedPartitions: %v", e.Partitions)
|
||||
}
|
||||
|
||||
// RevokedPartitions consumer group rebalance event: revoked partition set
|
||||
type RevokedPartitions struct {
|
||||
Partitions []TopicPartition
|
||||
}
|
||||
|
||||
func (e RevokedPartitions) String() string {
|
||||
return fmt.Sprintf("RevokedPartitions: %v", e.Partitions)
|
||||
}
|
||||
|
||||
// PartitionEOF consumer reached end of partition
|
||||
// Needs to be explicitly enabled by setting the `enable.partition.eof`
|
||||
// configuration property to true.
|
||||
type PartitionEOF TopicPartition
|
||||
|
||||
func (p PartitionEOF) String() string {
|
||||
return fmt.Sprintf("EOF at %s", TopicPartition(p))
|
||||
}
|
||||
|
||||
// OffsetsCommitted reports committed offsets
|
||||
type OffsetsCommitted struct {
|
||||
Error error
|
||||
Offsets []TopicPartition
|
||||
}
|
||||
|
||||
func (o OffsetsCommitted) String() string {
|
||||
return fmt.Sprintf("OffsetsCommitted (%v, %v)", o.Error, o.Offsets)
|
||||
}
|
||||
|
||||
// OAuthBearerTokenRefresh indicates token refresh is required
|
||||
type OAuthBearerTokenRefresh struct {
|
||||
// Config is the value of the sasl.oauthbearer.config property
|
||||
Config string
|
||||
}
|
||||
|
||||
func (o OAuthBearerTokenRefresh) String() string {
|
||||
return "OAuthBearerTokenRefresh"
|
||||
}
|
||||
|
||||
// eventPoll polls an event from the handler's C rd_kafka_queue_t,
|
||||
// translates it into an Event type and then sends on `channel` if non-nil, else returns the Event.
|
||||
// term_chan is an optional channel to monitor along with producing to channel
|
||||
// to indicate that `channel` is being terminated.
|
||||
// returns (event Event, terminate Bool) tuple, where Terminate indicates
|
||||
// if termChan received a termination event.
|
||||
func (h *handle) eventPoll(channel chan Event, timeoutMs int, maxEvents int, termChan chan bool) (Event, bool) {
|
||||
|
||||
var prevRkev *C.rd_kafka_event_t
|
||||
term := false
|
||||
|
||||
var retval Event
|
||||
|
||||
if channel == nil {
|
||||
maxEvents = 1
|
||||
}
|
||||
out:
|
||||
for evcnt := 0; evcnt < maxEvents; evcnt++ {
|
||||
var evtype C.rd_kafka_event_type_t
|
||||
var gMsg C.glue_msg_t
|
||||
gMsg.want_hdrs = C.int8_t(bool2cint(h.msgFields.Headers))
|
||||
rkev := C._rk_queue_poll(h.rkq, C.int(timeoutMs), &evtype, &gMsg, prevRkev)
|
||||
prevRkev = rkev
|
||||
timeoutMs = 0
|
||||
|
||||
retval = nil
|
||||
|
||||
switch evtype {
|
||||
case C.RD_KAFKA_EVENT_FETCH:
|
||||
// Consumer fetch event, new message.
|
||||
// Extracted into temporary gMsg for optimization
|
||||
retval = h.newMessageFromGlueMsg(&gMsg)
|
||||
|
||||
case C.RD_KAFKA_EVENT_REBALANCE:
|
||||
// Consumer rebalance event
|
||||
retval = h.c.handleRebalanceEvent(channel, rkev)
|
||||
|
||||
case C.RD_KAFKA_EVENT_ERROR:
|
||||
// Error event
|
||||
cErr := C.rd_kafka_event_error(rkev)
|
||||
if cErr == C.RD_KAFKA_RESP_ERR__PARTITION_EOF {
|
||||
crktpar := C.rd_kafka_event_topic_partition(rkev)
|
||||
if crktpar == nil {
|
||||
break
|
||||
}
|
||||
|
||||
defer C.rd_kafka_topic_partition_destroy(crktpar)
|
||||
var peof PartitionEOF
|
||||
setupTopicPartitionFromCrktpar((*TopicPartition)(&peof), crktpar)
|
||||
|
||||
retval = peof
|
||||
|
||||
} else if int(C.rd_kafka_event_error_is_fatal(rkev)) != 0 {
|
||||
// A fatal error has been raised.
|
||||
// Extract the actual error from the client
|
||||
// instance and return a new Error with
|
||||
// fatal set to true.
|
||||
cFatalErrstrSize := C.size_t(512)
|
||||
cFatalErrstr := (*C.char)(C.malloc(cFatalErrstrSize))
|
||||
defer C.free(unsafe.Pointer(cFatalErrstr))
|
||||
cFatalErr := C.rd_kafka_fatal_error(h.rk, cFatalErrstr, cFatalErrstrSize)
|
||||
fatalErr := newErrorFromCString(cFatalErr, cFatalErrstr)
|
||||
fatalErr.fatal = true
|
||||
retval = fatalErr
|
||||
|
||||
} else {
|
||||
retval = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev))
|
||||
}
|
||||
|
||||
case C.RD_KAFKA_EVENT_STATS:
|
||||
retval = &Stats{C.GoString(C.rd_kafka_event_stats(rkev))}
|
||||
|
||||
case C.RD_KAFKA_EVENT_DR:
|
||||
// Producer Delivery Report event
|
||||
// Each such event contains delivery reports for all
|
||||
// messages in the produced batch.
|
||||
// Forward delivery reports to per-message's response channel
|
||||
// or to the global Producer.Events channel, or none.
|
||||
rkmessages := make([]*C.rd_kafka_message_t, int(C.rd_kafka_event_message_count(rkev)))
|
||||
|
||||
cnt := int(C.rd_kafka_event_message_array(rkev, (**C.rd_kafka_message_t)(unsafe.Pointer(&rkmessages[0])), C.size_t(len(rkmessages))))
|
||||
|
||||
for _, rkmessage := range rkmessages[:cnt] {
|
||||
msg := h.newMessageFromC(rkmessage)
|
||||
var ch *chan Event
|
||||
|
||||
if rkmessage._private != nil {
|
||||
// Find cgoif by id
|
||||
cg, found := h.cgoGet((int)((uintptr)(rkmessage._private)))
|
||||
if found {
|
||||
cdr := cg.(cgoDr)
|
||||
|
||||
if cdr.deliveryChan != nil {
|
||||
ch = &cdr.deliveryChan
|
||||
}
|
||||
msg.Opaque = cdr.opaque
|
||||
}
|
||||
}
|
||||
|
||||
if ch == nil && h.fwdDr {
|
||||
ch = &channel
|
||||
}
|
||||
|
||||
if ch != nil {
|
||||
select {
|
||||
case *ch <- msg:
|
||||
case <-termChan:
|
||||
retval = nil
|
||||
term = true
|
||||
break out
|
||||
}
|
||||
|
||||
} else {
|
||||
retval = msg
|
||||
break out
|
||||
}
|
||||
}
|
||||
|
||||
case C.RD_KAFKA_EVENT_OFFSET_COMMIT:
|
||||
// Offsets committed
|
||||
cErr := C.rd_kafka_event_error(rkev)
|
||||
coffsets := C.rd_kafka_event_topic_partition_list(rkev)
|
||||
var offsets []TopicPartition
|
||||
if coffsets != nil {
|
||||
offsets = newTopicPartitionsFromCparts(coffsets)
|
||||
}
|
||||
|
||||
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
retval = OffsetsCommitted{newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)), offsets}
|
||||
} else {
|
||||
retval = OffsetsCommitted{nil, offsets}
|
||||
}
|
||||
|
||||
case C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
|
||||
ev := OAuthBearerTokenRefresh{C.GoString(C.rd_kafka_event_config_string(rkev))}
|
||||
retval = ev
|
||||
|
||||
case C.RD_KAFKA_EVENT_NONE:
|
||||
// poll timed out: no events available
|
||||
break out
|
||||
|
||||
default:
|
||||
if rkev != nil {
|
||||
fmt.Fprintf(os.Stderr, "Ignored event %s\n",
|
||||
C.GoString(C.rd_kafka_event_name(rkev)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if retval != nil {
|
||||
if channel != nil {
|
||||
select {
|
||||
case channel <- retval:
|
||||
case <-termChan:
|
||||
retval = nil
|
||||
term = true
|
||||
break out
|
||||
}
|
||||
} else {
|
||||
break out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if prevRkev != nil {
|
||||
C.rd_kafka_event_destroy(prevRkev)
|
||||
}
|
||||
|
||||
return retval, term
|
||||
}
|
||||
340
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go
generated
vendored
340
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go
generated
vendored
@@ -1,340 +0,0 @@
|
||||
package kafka
|
||||
|
||||
// Copyright 2016-2023 Confluent Inc.
|
||||
// AUTOMATICALLY GENERATED ON 2023-10-25 15:32:05.267754826 +0200 CEST m=+0.000622161 USING librdkafka 2.3.0
|
||||
|
||||
/*
|
||||
#include "select_rdkafka.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// ErrorCode is the integer representation of local and broker error codes
|
||||
type ErrorCode int
|
||||
|
||||
// String returns a human readable representation of an error code
|
||||
func (c ErrorCode) String() string {
|
||||
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c)))
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrBadMsg Local: Bad message format
|
||||
ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
|
||||
// ErrBadCompression Local: Invalid compressed data
|
||||
ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
|
||||
// ErrDestroy Local: Broker handle destroyed
|
||||
ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
|
||||
// ErrFail Local: Communication failure with broker
|
||||
ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
|
||||
// ErrTransport Local: Broker transport failure
|
||||
ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
|
||||
// ErrCritSysResource Local: Critical system resource failure
|
||||
ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
|
||||
// ErrResolve Local: Host resolution failure
|
||||
ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
|
||||
// ErrMsgTimedOut Local: Message timed out
|
||||
ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
|
||||
// ErrPartitionEOF Broker: No more messages
|
||||
ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
|
||||
// ErrUnknownPartition Local: Unknown partition
|
||||
ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
|
||||
// ErrFs Local: File or filesystem error
|
||||
ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
|
||||
// ErrUnknownTopic Local: Unknown topic
|
||||
ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
|
||||
// ErrAllBrokersDown Local: All broker connections are down
|
||||
ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
|
||||
// ErrInvalidArg Local: Invalid argument or configuration
|
||||
ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
|
||||
// ErrTimedOut Local: Timed out
|
||||
ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
|
||||
// ErrQueueFull Local: Queue full
|
||||
ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
|
||||
// ErrIsrInsuff Local: ISR count insufficient
|
||||
ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
|
||||
// ErrNodeUpdate Local: Broker node update
|
||||
ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
|
||||
// ErrSsl Local: SSL error
|
||||
ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
|
||||
// ErrWaitCoord Local: Waiting for coordinator
|
||||
ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
|
||||
// ErrUnknownGroup Local: Unknown group
|
||||
ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
|
||||
// ErrInProgress Local: Operation in progress
|
||||
ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
|
||||
// ErrPrevInProgress Local: Previous operation in progress
|
||||
ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
|
||||
// ErrExistingSubscription Local: Existing subscription
|
||||
ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
|
||||
// ErrAssignPartitions Local: Assign partitions
|
||||
ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
|
||||
// ErrRevokePartitions Local: Revoke partitions
|
||||
ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
|
||||
// ErrConflict Local: Conflicting use
|
||||
ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
|
||||
// ErrState Local: Erroneous state
|
||||
ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
|
||||
// ErrUnknownProtocol Local: Unknown protocol
|
||||
ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
|
||||
// ErrNotImplemented Local: Not implemented
|
||||
ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
|
||||
// ErrAuthentication Local: Authentication failure
|
||||
ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
|
||||
// ErrNoOffset Local: No offset stored
|
||||
ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
|
||||
// ErrOutdated Local: Outdated
|
||||
ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
|
||||
// ErrTimedOutQueue Local: Timed out in queue
|
||||
ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
|
||||
// ErrUnsupportedFeature Local: Required feature not supported by broker
|
||||
ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE)
|
||||
// ErrWaitCache Local: Awaiting cache update
|
||||
ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE)
|
||||
// ErrIntr Local: Operation interrupted
|
||||
ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR)
|
||||
// ErrKeySerialization Local: Key serialization error
|
||||
ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION)
|
||||
// ErrValueSerialization Local: Value serialization error
|
||||
ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION)
|
||||
// ErrKeyDeserialization Local: Key deserialization error
|
||||
ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION)
|
||||
// ErrValueDeserialization Local: Value deserialization error
|
||||
ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION)
|
||||
// ErrPartial Local: Partial response
|
||||
ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL)
|
||||
// ErrReadOnly Local: Read-only object
|
||||
ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY)
|
||||
// ErrNoent Local: No such entry
|
||||
ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT)
|
||||
// ErrUnderflow Local: Read underflow
|
||||
ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW)
|
||||
// ErrInvalidType Local: Invalid type
|
||||
ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE)
|
||||
// ErrRetry Local: Retry operation
|
||||
ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY)
|
||||
// ErrPurgeQueue Local: Purged in queue
|
||||
ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE)
|
||||
// ErrPurgeInflight Local: Purged in flight
|
||||
ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT)
|
||||
// ErrFatal Local: Fatal error
|
||||
ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL)
|
||||
// ErrInconsistent Local: Inconsistent state
|
||||
ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT)
|
||||
// ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
|
||||
ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE)
|
||||
// ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
|
||||
ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
|
||||
// ErrUnknownBroker Local: Unknown broker
|
||||
ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER)
|
||||
// ErrNotConfigured Local: Functionality not configured
|
||||
ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED)
|
||||
// ErrFenced Local: This instance has been fenced by a newer instance
|
||||
ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED)
|
||||
// ErrApplication Local: Application generated error
|
||||
ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION)
|
||||
// ErrAssignmentLost Local: Group partition assignment lost
|
||||
ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST)
|
||||
// ErrNoop Local: No operation performed
|
||||
ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP)
|
||||
// ErrAutoOffsetReset Local: No offset to automatically reset to
|
||||
ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET)
|
||||
// ErrLogTruncation Local: Partition log truncation detected
|
||||
ErrLogTruncation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__LOG_TRUNCATION)
|
||||
// ErrUnknown Unknown broker error
|
||||
ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
|
||||
// ErrNoError Success
|
||||
ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
|
||||
// ErrOffsetOutOfRange Broker: Offset out of range
|
||||
ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
|
||||
// ErrInvalidMsg Broker: Invalid message
|
||||
ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
|
||||
// ErrUnknownTopicOrPart Broker: Unknown topic or partition
|
||||
ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
|
||||
// ErrInvalidMsgSize Broker: Invalid message size
|
||||
ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
|
||||
// ErrLeaderNotAvailable Broker: Leader not available
|
||||
ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
|
||||
// ErrNotLeaderForPartition Broker: Not leader for partition
|
||||
ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
|
||||
// ErrRequestTimedOut Broker: Request timed out
|
||||
ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
|
||||
// ErrBrokerNotAvailable Broker: Broker not available
|
||||
ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
|
||||
// ErrReplicaNotAvailable Broker: Replica not available
|
||||
ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
|
||||
// ErrMsgSizeTooLarge Broker: Message size too large
|
||||
ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
|
||||
// ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
|
||||
ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
|
||||
// ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
|
||||
ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
|
||||
// ErrNetworkException Broker: Broker disconnected before response received
|
||||
ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
|
||||
// ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
|
||||
ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS)
|
||||
// ErrCoordinatorNotAvailable Broker: Coordinator not available
|
||||
ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)
|
||||
// ErrNotCoordinator Broker: Not coordinator
|
||||
ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR)
|
||||
// ErrTopicException Broker: Invalid topic
|
||||
ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
|
||||
// ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
|
||||
ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
|
||||
// ErrNotEnoughReplicas Broker: Not enough in-sync replicas
|
||||
ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
|
||||
// ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
|
||||
ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
|
||||
// ErrInvalidRequiredAcks Broker: Invalid required acks value
|
||||
ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
|
||||
// ErrIllegalGeneration Broker: Specified group generation id is not valid
|
||||
ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
|
||||
// ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
|
||||
ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
|
||||
// ErrInvalidGroupID Broker: Invalid group.id
|
||||
ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
|
||||
// ErrUnknownMemberID Broker: Unknown member
|
||||
ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
|
||||
// ErrInvalidSessionTimeout Broker: Invalid session timeout
|
||||
ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
|
||||
// ErrRebalanceInProgress Broker: Group rebalance in progress
|
||||
ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
|
||||
// ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
|
||||
ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
|
||||
// ErrTopicAuthorizationFailed Broker: Topic authorization failed
|
||||
ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
|
||||
// ErrGroupAuthorizationFailed Broker: Group authorization failed
|
||||
ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
|
||||
// ErrClusterAuthorizationFailed Broker: Cluster authorization failed
|
||||
ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
|
||||
// ErrInvalidTimestamp Broker: Invalid timestamp
|
||||
ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
|
||||
// ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
|
||||
ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
|
||||
// ErrIllegalSaslState Broker: Request not valid in current SASL state
|
||||
ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
|
||||
// ErrUnsupportedVersion Broker: API version not supported
|
||||
ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
|
||||
// ErrTopicAlreadyExists Broker: Topic already exists
|
||||
ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS)
|
||||
// ErrInvalidPartitions Broker: Invalid number of partitions
|
||||
ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS)
|
||||
// ErrInvalidReplicationFactor Broker: Invalid replication factor
|
||||
ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR)
|
||||
// ErrInvalidReplicaAssignment Broker: Invalid replica assignment
|
||||
ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT)
|
||||
// ErrInvalidConfig Broker: Configuration is invalid
|
||||
ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG)
|
||||
// ErrNotController Broker: Not controller for cluster
|
||||
ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER)
|
||||
// ErrInvalidRequest Broker: Invalid request
|
||||
ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST)
|
||||
// ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
|
||||
ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT)
|
||||
// ErrPolicyViolation Broker: Policy violation
|
||||
ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION)
|
||||
// ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
|
||||
ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER)
|
||||
// ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
|
||||
ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER)
|
||||
// ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
|
||||
ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH)
|
||||
// ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
|
||||
ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE)
|
||||
// ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
|
||||
ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING)
|
||||
// ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
|
||||
ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT)
|
||||
// ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
|
||||
ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS)
|
||||
// ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
|
||||
ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED)
|
||||
// ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
|
||||
ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED)
|
||||
// ErrSecurityDisabled Broker: Security features are disabled
|
||||
ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED)
|
||||
// ErrOperationNotAttempted Broker: Operation not attempted
|
||||
ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED)
|
||||
// ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
|
||||
ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
|
||||
// ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
|
||||
ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND)
|
||||
// ErrSaslAuthenticationFailed Broker: SASL Authentication failed
|
||||
ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
|
||||
// ErrUnknownProducerID Broker: Unknown Producer Id
|
||||
ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID)
|
||||
// ErrReassignmentInProgress Broker: Partition reassignment is in progress
|
||||
ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS)
|
||||
// ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
|
||||
ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED)
|
||||
// ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
|
||||
ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND)
|
||||
// ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
|
||||
ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH)
|
||||
// ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
|
||||
ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED)
|
||||
// ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
|
||||
ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED)
|
||||
// ErrDelegationTokenExpired Broker: Delegation Token is expired
|
||||
ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED)
|
||||
// ErrInvalidPrincipalType Broker: Supplied principalType is not supported
|
||||
ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE)
|
||||
// ErrNonEmptyGroup Broker: The group is not empty
|
||||
ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP)
|
||||
// ErrGroupIDNotFound Broker: The group id does not exist
|
||||
ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND)
|
||||
// ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
|
||||
ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND)
|
||||
// ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
|
||||
ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH)
|
||||
// ErrListenerNotFound Broker: No matching listener
|
||||
ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND)
|
||||
// ErrTopicDeletionDisabled Broker: Topic deletion is disabled
|
||||
ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED)
|
||||
// ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
|
||||
ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH)
|
||||
// ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
|
||||
ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH)
|
||||
// ErrUnsupportedCompressionType Broker: Unsupported compression type
|
||||
ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE)
|
||||
// ErrStaleBrokerEpoch Broker: Broker epoch has changed
|
||||
ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH)
|
||||
// ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
|
||||
ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE)
|
||||
// ErrMemberIDRequired Broker: Group member needs a valid member ID
|
||||
ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)
|
||||
// ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
|
||||
ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE)
|
||||
// ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
|
||||
ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED)
|
||||
// ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
|
||||
ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
|
||||
// ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
|
||||
ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE)
|
||||
// ErrElectionNotNeeded Broker: Leader election not needed for topic partition
|
||||
ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED)
|
||||
// ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
|
||||
ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS)
|
||||
// ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
|
||||
ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC)
|
||||
// ErrInvalidRecord Broker: Broker failed to validate record
|
||||
ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD)
|
||||
// ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
|
||||
ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
|
||||
// ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
|
||||
ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED)
|
||||
// ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
|
||||
ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED)
|
||||
// ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
|
||||
ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND)
|
||||
// ErrDuplicateResource Broker: Request illegally referred to the same resource twice
|
||||
ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE)
|
||||
// ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
|
||||
ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL)
|
||||
// ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
|
||||
ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET)
|
||||
// ErrInvalidUpdateVersion Broker: Invalid update version
|
||||
ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION)
|
||||
// ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
|
||||
ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED)
|
||||
// ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
|
||||
ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE)
|
||||
)
|
||||
48
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/glue_rdkafka.h
generated
vendored
48
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/glue_rdkafka.h
generated
vendored
@@ -1,48 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
|
||||
/**
|
||||
* Glue between Go, Cgo and librdkafka
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Temporary C to Go header representation
|
||||
*/
|
||||
typedef struct tmphdr_s {
|
||||
const char *key;
|
||||
const void *val; // producer: malloc()ed by Go code if size > 0
|
||||
// consumer: owned by librdkafka
|
||||
ssize_t size;
|
||||
} tmphdr_t;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @struct This is a glue struct used by the C code in this client to
|
||||
* effectively map fields from a librdkafka rd_kafka_message_t
|
||||
* to something usable in Go with as few CGo calls as possible.
|
||||
*/
|
||||
typedef struct glue_msg_s {
|
||||
rd_kafka_message_t *msg;
|
||||
rd_kafka_timestamp_type_t tstype;
|
||||
int64_t ts;
|
||||
tmphdr_t *tmphdrs;
|
||||
size_t tmphdrsCnt;
|
||||
int8_t want_hdrs; /**< If true, copy headers */
|
||||
} glue_msg_t;
|
||||
385
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go
generated
vendored
385
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go
generated
vendored
@@ -1,385 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include "select_rdkafka.h"
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// OAuthBearerToken represents the data to be transmitted
|
||||
// to a broker during SASL/OAUTHBEARER authentication.
|
||||
type OAuthBearerToken struct {
|
||||
// Token value, often (but not necessarily) a JWS compact serialization
|
||||
// as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
|
||||
// the regular expression for a SASL/OAUTHBEARER value defined at
|
||||
// https://tools.ietf.org/html/rfc7628#section-3.1
|
||||
TokenValue string
|
||||
// Metadata about the token indicating when it expires (local time);
|
||||
// it must represent a time in the future
|
||||
Expiration time.Time
|
||||
// Metadata about the token indicating the Kafka principal name
|
||||
// to which it applies (for example, "admin")
|
||||
Principal string
|
||||
// SASL extensions, if any, to be communicated to the broker during
|
||||
// authentication (all keys and values of which must meet the regular
|
||||
// expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
|
||||
// and it must not contain the reserved "auth" key)
|
||||
Extensions map[string]string
|
||||
}
|
||||
|
||||
// Handle represents a generic client handle containing common parts for
|
||||
// both Producer and Consumer.
|
||||
type Handle interface {
|
||||
// SetOAuthBearerToken sets the the data to be transmitted
|
||||
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
|
||||
// on success, otherwise an error if:
|
||||
// 1) the token data is invalid (meaning an expiration time in the past
|
||||
// or either a token value or an extension key or value that does not meet
|
||||
// the regular expression requirements as per
|
||||
// https://tools.ietf.org/html/rfc7628#section-3.1);
|
||||
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||
// authentication mechanism.
|
||||
SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
|
||||
|
||||
// SetOAuthBearerTokenFailure sets the error message describing why token
|
||||
// retrieval/setting failed; it also schedules a new token refresh event for 10
|
||||
// seconds later so the attempt may be retried. It will return nil on
|
||||
// success, otherwise an error if:
|
||||
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||
// authentication mechanism.
|
||||
SetOAuthBearerTokenFailure(errstr string) error
|
||||
|
||||
// gethandle() returns the internal handle struct pointer
|
||||
gethandle() *handle
|
||||
|
||||
// verifyClient() returns the validity of client
|
||||
verifyClient() error
|
||||
|
||||
// IsClosed() returns the bool to check if the client is closed
|
||||
IsClosed() bool
|
||||
}
|
||||
|
||||
// Common instance handle for both Producer and Consumer
|
||||
type handle struct {
|
||||
rk *C.rd_kafka_t
|
||||
rkq *C.rd_kafka_queue_t
|
||||
|
||||
// Forward logs from librdkafka log queue to logs channel.
|
||||
logs chan LogEvent
|
||||
logq *C.rd_kafka_queue_t
|
||||
closeLogsChan bool
|
||||
|
||||
// Topic <-> rkt caches
|
||||
rktCacheLock sync.Mutex
|
||||
// topic name -> rkt cache
|
||||
rktCache map[string]*C.rd_kafka_topic_t
|
||||
// rkt -> topic name cache
|
||||
rktNameCache map[*C.rd_kafka_topic_t]string
|
||||
|
||||
// Cached instance name to avoid CGo call in String()
|
||||
name string
|
||||
|
||||
//
|
||||
// cgo map
|
||||
// Maps C callbacks based on cgoid back to its Go object
|
||||
cgoLock sync.Mutex
|
||||
cgoidNext uintptr
|
||||
cgomap map[int]cgoif
|
||||
|
||||
//
|
||||
// producer
|
||||
//
|
||||
p *Producer
|
||||
|
||||
// Forward delivery reports on Producer.Events channel
|
||||
fwdDr bool
|
||||
|
||||
// Enabled message fields for delivery reports and consumed messages.
|
||||
msgFields *messageFields
|
||||
|
||||
//
|
||||
// consumer
|
||||
//
|
||||
c *Consumer
|
||||
|
||||
// WaitGroup to wait for spawned go-routines to finish.
|
||||
waitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func (h *handle) String() string {
|
||||
return h.name
|
||||
}
|
||||
|
||||
func (h *handle) setup() {
|
||||
h.rktCache = make(map[string]*C.rd_kafka_topic_t)
|
||||
h.rktNameCache = make(map[*C.rd_kafka_topic_t]string)
|
||||
h.cgomap = make(map[int]cgoif)
|
||||
h.name = C.GoString(C.rd_kafka_name(h.rk))
|
||||
if h.msgFields == nil {
|
||||
h.msgFields = newMessageFields()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handle) cleanup() {
|
||||
if h.logs != nil {
|
||||
C.rd_kafka_queue_destroy(h.logq)
|
||||
if h.closeLogsChan {
|
||||
close(h.logs)
|
||||
}
|
||||
}
|
||||
|
||||
for _, crkt := range h.rktCache {
|
||||
C.rd_kafka_topic_destroy(crkt)
|
||||
}
|
||||
|
||||
if h.rkq != nil {
|
||||
C.rd_kafka_queue_destroy(h.rkq)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handle) setupLogQueue(logsChan chan LogEvent, termChan chan bool) {
|
||||
if logsChan == nil {
|
||||
logsChan = make(chan LogEvent, 10000)
|
||||
h.closeLogsChan = true
|
||||
}
|
||||
|
||||
h.logs = logsChan
|
||||
|
||||
// Let librdkafka forward logs to our log queue instead of the main queue
|
||||
h.logq = C.rd_kafka_queue_new(h.rk)
|
||||
C.rd_kafka_set_log_queue(h.rk, h.logq)
|
||||
|
||||
// Start a polling goroutine to consume the log queue
|
||||
h.waitGroup.Add(1)
|
||||
go func() {
|
||||
h.pollLogEvents(h.logs, 100, termChan)
|
||||
h.waitGroup.Done()
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
// getRkt0 finds or creates and returns a C topic_t object from the local cache.
|
||||
func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) {
|
||||
if doLock {
|
||||
h.rktCacheLock.Lock()
|
||||
defer h.rktCacheLock.Unlock()
|
||||
}
|
||||
crkt, ok := h.rktCache[topic]
|
||||
if ok {
|
||||
return crkt
|
||||
}
|
||||
|
||||
if ctopic == nil {
|
||||
ctopic = C.CString(topic)
|
||||
defer C.free(unsafe.Pointer(ctopic))
|
||||
}
|
||||
|
||||
crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil)
|
||||
if crkt == nil {
|
||||
panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s",
|
||||
topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error()))))
|
||||
}
|
||||
|
||||
h.rktCache[topic] = crkt
|
||||
h.rktNameCache[crkt] = topic
|
||||
|
||||
return crkt
|
||||
}
|
||||
|
||||
// getRkt finds or creates and returns a C topic_t object from the local cache.
|
||||
func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) {
|
||||
return h.getRkt0(topic, nil, true)
|
||||
}
|
||||
|
||||
// getTopicNameFromRkt returns the topic name for a C topic_t object, preferably
|
||||
// using the local cache to avoid a cgo call.
|
||||
func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) {
|
||||
h.rktCacheLock.Lock()
|
||||
defer h.rktCacheLock.Unlock()
|
||||
|
||||
topic, ok := h.rktNameCache[crkt]
|
||||
if ok {
|
||||
return topic
|
||||
}
|
||||
|
||||
// we need our own copy/refcount of the crkt
|
||||
ctopic := C.rd_kafka_topic_name(crkt)
|
||||
topic = C.GoString(ctopic)
|
||||
|
||||
crkt = h.getRkt0(topic, ctopic, false /* dont lock */)
|
||||
|
||||
return topic
|
||||
}
|
||||
|
||||
// cgoif is a generic interface for holding Go state passed as opaque
|
||||
// value to the C code.
|
||||
// Since pointers to complex Go types cannot be passed to C we instead create
|
||||
// a cgoif object, generate a unique id that is added to the cgomap,
|
||||
// and then pass that id to the C code. When the C code callback is called we
|
||||
// use the id to look up the cgoif object in the cgomap.
|
||||
type cgoif interface{}
|
||||
|
||||
// delivery report cgoif container
|
||||
type cgoDr struct {
|
||||
deliveryChan chan Event
|
||||
opaque interface{}
|
||||
}
|
||||
|
||||
// cgoPut adds object cg to the handle's cgo map and returns a
|
||||
// unique id for the added entry.
|
||||
// Thread-safe.
|
||||
// FIXME: the uniquity of the id is questionable over time.
|
||||
func (h *handle) cgoPut(cg cgoif) (cgoid int) {
|
||||
h.cgoLock.Lock()
|
||||
defer h.cgoLock.Unlock()
|
||||
|
||||
h.cgoidNext++
|
||||
if h.cgoidNext == 0 {
|
||||
h.cgoidNext++
|
||||
}
|
||||
cgoid = (int)(h.cgoidNext)
|
||||
h.cgomap[cgoid] = cg
|
||||
return cgoid
|
||||
}
|
||||
|
||||
// cgoGet looks up cgoid in the cgo map, deletes the reference from the map
|
||||
// and returns the object, if found. Else returns nil, false.
|
||||
// Thread-safe.
|
||||
func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) {
|
||||
if cgoid == 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
h.cgoLock.Lock()
|
||||
defer h.cgoLock.Unlock()
|
||||
cg, found = h.cgomap[cgoid]
|
||||
if found {
|
||||
delete(h.cgomap, cgoid)
|
||||
}
|
||||
|
||||
return cg, found
|
||||
}
|
||||
|
||||
// setOauthBearerToken - see rd_kafka_oauthbearer_set_token()
|
||||
func (h *handle) setOAuthBearerToken(oauthBearerToken OAuthBearerToken) error {
|
||||
cTokenValue := C.CString(oauthBearerToken.TokenValue)
|
||||
defer C.free(unsafe.Pointer(cTokenValue))
|
||||
|
||||
cPrincipal := C.CString(oauthBearerToken.Principal)
|
||||
defer C.free(unsafe.Pointer(cPrincipal))
|
||||
|
||||
cErrstrSize := C.size_t(512)
|
||||
cErrstr := (*C.char)(C.malloc(cErrstrSize))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
cExtensions := make([]*C.char, 2*len(oauthBearerToken.Extensions))
|
||||
extensionSize := 0
|
||||
for key, value := range oauthBearerToken.Extensions {
|
||||
cExtensions[extensionSize] = C.CString(key)
|
||||
defer C.free(unsafe.Pointer(cExtensions[extensionSize]))
|
||||
extensionSize++
|
||||
cExtensions[extensionSize] = C.CString(value)
|
||||
defer C.free(unsafe.Pointer(cExtensions[extensionSize]))
|
||||
extensionSize++
|
||||
}
|
||||
|
||||
var cExtensionsToUse **C.char
|
||||
if extensionSize > 0 {
|
||||
cExtensionsToUse = (**C.char)(unsafe.Pointer(&cExtensions[0]))
|
||||
}
|
||||
|
||||
cErr := C.rd_kafka_oauthbearer_set_token(h.rk, cTokenValue,
|
||||
C.int64_t(oauthBearerToken.Expiration.UnixNano()/(1000*1000)), cPrincipal,
|
||||
cExtensionsToUse, C.size_t(extensionSize), cErrstr, cErrstrSize)
|
||||
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return nil
|
||||
}
|
||||
return newErrorFromCString(cErr, cErrstr)
|
||||
}
|
||||
|
||||
// setOauthBearerTokenFailure - see rd_kafka_oauthbearer_set_token_failure()
|
||||
func (h *handle) setOAuthBearerTokenFailure(errstr string) error {
|
||||
cerrstr := C.CString(errstr)
|
||||
defer C.free(unsafe.Pointer(cerrstr))
|
||||
cErr := C.rd_kafka_oauthbearer_set_token_failure(h.rk, cerrstr)
|
||||
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return nil
|
||||
}
|
||||
return newError(cErr)
|
||||
}
|
||||
|
||||
// messageFields controls which fields are made available for producer delivery reports & consumed messages.
|
||||
// true values indicate that the field should be included
|
||||
type messageFields struct {
|
||||
Key bool
|
||||
Value bool
|
||||
Headers bool
|
||||
}
|
||||
|
||||
// disableAll disable all fields
|
||||
func (mf *messageFields) disableAll() {
|
||||
mf.Key = false
|
||||
mf.Value = false
|
||||
mf.Headers = false
|
||||
}
|
||||
|
||||
// newMessageFields returns a new messageFields with all fields enabled
|
||||
func newMessageFields() *messageFields {
|
||||
return &messageFields{
|
||||
Key: true,
|
||||
Value: true,
|
||||
Headers: true,
|
||||
}
|
||||
}
|
||||
|
||||
// newMessageFieldsFrom constructs a new messageFields from the given configuration value
|
||||
func newMessageFieldsFrom(v ConfigValue) (*messageFields, error) {
|
||||
msgFields := newMessageFields()
|
||||
switch v {
|
||||
case "all":
|
||||
// nothing to do
|
||||
case "", "none":
|
||||
msgFields.disableAll()
|
||||
default:
|
||||
msgFields.disableAll()
|
||||
for _, value := range strings.Split(v.(string), ",") {
|
||||
switch value {
|
||||
case "key":
|
||||
msgFields.Key = true
|
||||
case "value":
|
||||
msgFields.Value = true
|
||||
case "headers":
|
||||
msgFields.Headers = true
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message field: %s", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msgFields, nil
|
||||
}
|
||||
67
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/header.go
generated
vendored
67
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/header.go
generated
vendored
@@ -1,67 +0,0 @@
|
||||
/**
|
||||
* Copyright 2018 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <string.h>
|
||||
#include "select_rdkafka.h"
|
||||
#include "glue_rdkafka.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Header represents a single Kafka message header.
|
||||
//
|
||||
// Message headers are made up of a list of Header elements, retaining their original insert
|
||||
// order and allowing for duplicate Keys.
|
||||
//
|
||||
// Key is a human readable string identifying the header.
|
||||
// Value is the key's binary value, Kafka does not put any restrictions on the format of
|
||||
// of the Value but it should be made relatively compact.
|
||||
// The value may be a byte array, empty, or nil.
|
||||
//
|
||||
// NOTE: Message headers are not available on producer delivery report messages.
|
||||
type Header struct {
|
||||
Key string // Header name (utf-8 string)
|
||||
Value []byte // Header value (nil, empty, or binary)
|
||||
}
|
||||
|
||||
// String returns the Header Key and data in a human representable possibly truncated form
|
||||
// suitable for displaying to the user.
|
||||
func (h Header) String() string {
|
||||
if h.Value == nil {
|
||||
return fmt.Sprintf("%s=nil", h.Key)
|
||||
}
|
||||
|
||||
valueLen := len(h.Value)
|
||||
if valueLen == 0 {
|
||||
return fmt.Sprintf("%s=<empty>", h.Key)
|
||||
}
|
||||
|
||||
truncSize := valueLen
|
||||
trunc := ""
|
||||
if valueLen > 50+15 {
|
||||
truncSize = 50
|
||||
trunc = fmt.Sprintf("(%d more bytes)", valueLen-truncSize)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s=%s%s", h.Key, strconv.Quote(string(h.Value[:truncSize])), trunc)
|
||||
}
|
||||
483
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go
generated
vendored
483
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go
generated
vendored
@@ -1,483 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package kafka provides high-level Apache Kafka producer and consumers
|
||||
// using bindings on-top of the librdkafka C library.
|
||||
//
|
||||
// # High-level Consumer
|
||||
//
|
||||
// * Decide if you want to read messages and events by calling `.Poll()` or
|
||||
// the deprecated option of using the `.Events()` channel. (If you want to use
|
||||
// `.Events()` channel then set `"go.events.channel.enable": true`).
|
||||
//
|
||||
// * Create a Consumer with `kafka.NewConsumer()` providing at
|
||||
// least the `bootstrap.servers` and `group.id` configuration properties.
|
||||
//
|
||||
// * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics)
|
||||
// to join the group with the specified subscription set.
|
||||
// Subscriptions are atomic, calling `.Subscribe*()` again will leave
|
||||
// the group and rejoin with the new set of topics.
|
||||
//
|
||||
// * Start reading events and messages from either the `.Events` channel
|
||||
// or by calling `.Poll()`.
|
||||
//
|
||||
// * When the group has rebalanced each client member is assigned a
|
||||
// (sub-)set of topic+partitions.
|
||||
// By default the consumer will start fetching messages for its assigned
|
||||
// partitions at this point, but your application may enable rebalance
|
||||
// events to get an insight into what the assigned partitions where
|
||||
// as well as set the initial offsets. To do this you need to pass
|
||||
// `"go.application.rebalance.enable": true` to the `NewConsumer()` call
|
||||
// mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event
|
||||
// with the assigned partition set. You can optionally modify the initial
|
||||
// offsets (they'll default to stored offsets and if there are no previously stored
|
||||
// offsets it will fall back to `"auto.offset.reset"`
|
||||
// which defaults to the `latest` message) and then call `.Assign(partitions)`
|
||||
// to start consuming. If you don't need to modify the initial offsets you will
|
||||
// not need to call `.Assign()`, the client will do so automatically for you if
|
||||
// you dont, unless you are using the channel-based consumer in which case
|
||||
// you MUST call `.Assign()` when receiving the `AssignedPartitions` and
|
||||
// `RevokedPartitions` events.
|
||||
//
|
||||
// * As messages are fetched they will be made available on either the
|
||||
// `.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`.
|
||||
//
|
||||
// * Handle messages, events and errors to your liking.
|
||||
//
|
||||
// * When you are done consuming call `.Close()` to commit final offsets
|
||||
// and leave the consumer group.
|
||||
//
|
||||
// # Producer
|
||||
//
|
||||
// * Create a Producer with `kafka.NewProducer()` providing at least
|
||||
// the `bootstrap.servers` configuration properties.
|
||||
//
|
||||
// * Messages may now be produced either by sending a `*kafka.Message`
|
||||
// on the `.ProduceChannel` or by calling `.Produce()`.
|
||||
//
|
||||
// * Producing is an asynchronous operation so the client notifies the application
|
||||
// of per-message produce success or failure through something called delivery reports.
|
||||
// Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message`
|
||||
// and you should check `msg.TopicPartition.Error` for `nil` to find out if the message
|
||||
// was succesfully delivered or not.
|
||||
// It is also possible to direct delivery reports to alternate channels
|
||||
// by providing a non-nil `chan Event` channel to `.Produce()`.
|
||||
// If no delivery reports are wanted they can be completely disabled by
|
||||
// setting configuration property `"go.delivery.reports": false`.
|
||||
//
|
||||
// * When you are done producing messages you will need to make sure all messages
|
||||
// are indeed delivered to the broker (or failed), remember that this is
|
||||
// an asynchronous client so some of your messages may be lingering in internal
|
||||
// channels or tranmission queues.
|
||||
// To do this you can either keep track of the messages you've produced
|
||||
// and wait for their corresponding delivery reports, or call the convenience
|
||||
// function `.Flush()` that will block until all message deliveries are done
|
||||
// or the provided timeout elapses.
|
||||
//
|
||||
// * Finally call `.Close()` to decommission the producer.
|
||||
//
|
||||
// # Transactional producer API
|
||||
//
|
||||
// The transactional producer operates on top of the idempotent producer,
|
||||
// and provides full exactly-once semantics (EOS) for Apache Kafka when used
|
||||
// with the transaction aware consumer (`isolation.level=read_committed`).
|
||||
//
|
||||
// A producer instance is configured for transactions by setting the
|
||||
// `transactional.id` to an identifier unique for the application. This
|
||||
// id will be used to fence stale transactions from previous instances of
|
||||
// the application, typically following an outage or crash.
|
||||
//
|
||||
// After creating the transactional producer instance using `NewProducer()`
|
||||
// the transactional state must be initialized by calling
|
||||
// `InitTransactions()`. This is a blocking call that will
|
||||
// acquire a runtime producer id from the transaction coordinator broker
|
||||
// as well as abort any stale transactions and fence any still running producer
|
||||
// instances with the same `transactional.id`.
|
||||
//
|
||||
// Once transactions are initialized the application may begin a new
|
||||
// transaction by calling `BeginTransaction()`.
|
||||
// A producer instance may only have one single on-going transaction.
|
||||
//
|
||||
// Any messages produced after the transaction has been started will
|
||||
// belong to the ongoing transaction and will be committed or aborted
|
||||
// atomically.
|
||||
// It is not permitted to produce messages outside a transaction
|
||||
// boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`,
|
||||
// `AbortTransaction()` or if the current transaction has failed.
|
||||
//
|
||||
// If consumed messages are used as input to the transaction, the consumer
|
||||
// instance must be configured with `enable.auto.commit` set to `false`.
|
||||
// To commit the consumed offsets along with the transaction pass the
|
||||
// list of consumed partitions and the last offset processed + 1 to
|
||||
// `SendOffsetsToTransaction()` prior to committing the transaction.
|
||||
// This allows an aborted transaction to be restarted using the previously
|
||||
// committed offsets.
|
||||
//
|
||||
// To commit the produced messages, and any consumed offsets, to the
|
||||
// current transaction, call `CommitTransaction()`.
|
||||
// This call will block until the transaction has been fully committed or
|
||||
// failed (typically due to fencing by a newer producer instance).
|
||||
//
|
||||
// Alternatively, if processing fails, or an abortable transaction error is
|
||||
// raised, the transaction needs to be aborted by calling
|
||||
// `AbortTransaction()` which marks any produced messages and
|
||||
// offset commits as aborted.
|
||||
//
|
||||
// After the current transaction has been committed or aborted a new
|
||||
// transaction may be started by calling `BeginTransaction()` again.
|
||||
//
|
||||
// Retriable errors:
|
||||
// Some error cases allow the attempted operation to be retried, this is
|
||||
// indicated by the error object having the retriable flag set which can
|
||||
// be detected by calling `err.(kafka.Error).IsRetriable()`.
|
||||
// When this flag is set the application may retry the operation immediately
|
||||
// or preferably after a shorter grace period (to avoid busy-looping).
|
||||
// Retriable errors include timeouts, broker transport failures, etc.
|
||||
//
|
||||
// Abortable errors:
|
||||
// An ongoing transaction may fail permanently due to various errors,
|
||||
// such as transaction coordinator becoming unavailable, write failures to the
|
||||
// Apache Kafka log, under-replicated partitions, etc.
|
||||
// At this point the producer application must abort the current transaction
|
||||
// using `AbortTransaction()` and optionally start a new transaction
|
||||
// by calling `BeginTransaction()`.
|
||||
// Whether an error is abortable or not is detected by calling
|
||||
// `err.(kafka.Error).TxnRequiresAbort()` on the returned error object.
|
||||
//
|
||||
// Fatal errors:
|
||||
// While the underlying idempotent producer will typically only raise
|
||||
// fatal errors for unrecoverable cluster errors where the idempotency
|
||||
// guarantees can't be maintained, most of these are treated as abortable by
|
||||
// the transactional producer since transactions may be aborted and retried
|
||||
// in their entirety;
|
||||
// The transactional producer on the other hand introduces a set of additional
|
||||
// fatal errors which the application needs to handle by shutting down the
|
||||
// producer and terminate. There is no way for a producer instance to recover
|
||||
// from fatal errors.
|
||||
// Whether an error is fatal or not is detected by calling
|
||||
// `err.(kafka.Error).IsFatal()` on the returned error object or by checking
|
||||
// the global `GetFatalError()`.
|
||||
//
|
||||
// Handling of other errors:
|
||||
// For errors that have neither retriable, abortable or the fatal flag set
|
||||
// it is not always obvious how to handle them. While some of these errors
|
||||
// may be indicative of bugs in the application code, such as when
|
||||
// an invalid parameter is passed to a method, other errors might originate
|
||||
// from the broker and be passed thru as-is to the application.
|
||||
// The general recommendation is to treat these errors, that have
|
||||
// neither the retriable or abortable flags set, as fatal.
|
||||
//
|
||||
// Error handling example:
|
||||
//
|
||||
// retry:
|
||||
//
|
||||
// err := producer.CommitTransaction(...)
|
||||
// if err == nil {
|
||||
// return nil
|
||||
// } else if err.(kafka.Error).TxnRequiresAbort() {
|
||||
// do_abort_transaction_and_reset_inputs()
|
||||
// } else if err.(kafka.Error).IsRetriable() {
|
||||
// goto retry
|
||||
// } else { // treat all other errors as fatal errors
|
||||
// panic(err)
|
||||
// }
|
||||
//
|
||||
// # Events
|
||||
//
|
||||
// Apart from emitting messages and delivery reports the client also communicates
|
||||
// with the application through a number of different event types.
|
||||
// An application may choose to handle or ignore these events.
|
||||
//
|
||||
// # Consumer events
|
||||
//
|
||||
// * `*kafka.Message` - a fetched message.
|
||||
//
|
||||
// * `AssignedPartitions` - The assigned partition set for this client following a rebalance.
|
||||
// Requires `go.application.rebalance.enable`
|
||||
//
|
||||
// * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance.
|
||||
// `AssignedPartitions` and `RevokedPartitions` are symmetrical.
|
||||
// Requires `go.application.rebalance.enable`
|
||||
//
|
||||
// * `PartitionEOF` - Consumer has reached the end of a partition.
|
||||
// NOTE: The consumer will keep trying to fetch new messages for the partition.
|
||||
//
|
||||
// * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled).
|
||||
//
|
||||
// # Producer events
|
||||
//
|
||||
// * `*kafka.Message` - delivery report for produced message.
|
||||
// Check `.TopicPartition.Error` for delivery result.
|
||||
//
|
||||
// # Generic events for both Consumer and Producer
|
||||
//
|
||||
// * `KafkaError` - client (error codes are prefixed with _) or broker error.
|
||||
// These errors are normally just informational since the
|
||||
// client will try its best to automatically recover (eventually).
|
||||
//
|
||||
// * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required.
|
||||
// This event only occurs with sasl.mechanism=OAUTHBEARER.
|
||||
// Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient
|
||||
// instance when a successful token retrieval is completed, otherwise be sure to
|
||||
// invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or
|
||||
// if setting the token failed, which could happen if an extension doesn't meet
|
||||
// the required regular expression); invoking SetOAuthBearerTokenFailure() will
|
||||
// schedule a new event for 10 seconds later so another retrieval can be attempted.
|
||||
//
|
||||
// Hint: If your application registers a signal notification
|
||||
// (signal.Notify) makes sure the signals channel is buffered to avoid
|
||||
// possible complications with blocking Poll() calls.
|
||||
//
|
||||
// Note: The Confluent Kafka Go client is safe for concurrent use.
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
// Make sure librdkafka_vendor/ sub-directory is included in vendor pulls.
|
||||
_ "github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "select_rdkafka.h"
|
||||
|
||||
static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) {
|
||||
return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL;
|
||||
}
|
||||
|
||||
static const rd_kafka_group_result_t *
|
||||
group_result_by_idx (const rd_kafka_group_result_t **groups, size_t cnt, size_t idx) {
|
||||
if (idx >= cnt)
|
||||
return NULL;
|
||||
return groups[idx];
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// PartitionAny represents any partition (for partitioning),
|
||||
// or unspecified value (for all other cases)
|
||||
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
|
||||
|
||||
// TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset.
|
||||
type TopicPartition struct {
|
||||
Topic *string
|
||||
Partition int32
|
||||
Offset Offset
|
||||
Metadata *string
|
||||
Error error
|
||||
LeaderEpoch *int32 // LeaderEpoch or nil if not available
|
||||
}
|
||||
|
||||
func (p TopicPartition) String() string {
|
||||
topic := "<null>"
|
||||
if p.Topic != nil {
|
||||
topic = *p.Topic
|
||||
}
|
||||
if p.Error != nil {
|
||||
return fmt.Sprintf("%s[%d]@%s(%s)",
|
||||
topic, p.Partition, p.Offset, p.Error)
|
||||
}
|
||||
return fmt.Sprintf("%s[%d]@%s",
|
||||
topic, p.Partition, p.Offset)
|
||||
}
|
||||
|
||||
// TopicPartitions is a slice of TopicPartitions that also implements
|
||||
// the sort interface
|
||||
type TopicPartitions []TopicPartition
|
||||
|
||||
func (tps TopicPartitions) Len() int {
|
||||
return len(tps)
|
||||
}
|
||||
|
||||
func (tps TopicPartitions) Less(i, j int) bool {
|
||||
if *tps[i].Topic < *tps[j].Topic {
|
||||
return true
|
||||
} else if *tps[i].Topic > *tps[j].Topic {
|
||||
return false
|
||||
}
|
||||
return tps[i].Partition < tps[j].Partition
|
||||
}
|
||||
|
||||
func (tps TopicPartitions) Swap(i, j int) {
|
||||
tps[i], tps[j] = tps[j], tps[i]
|
||||
}
|
||||
|
||||
// Node represents a Kafka broker.
|
||||
type Node struct {
|
||||
// Node id.
|
||||
ID int
|
||||
// Node host.
|
||||
Host string
|
||||
// Node port.
|
||||
Port int
|
||||
// Node rack (may be nil)
|
||||
Rack *string
|
||||
}
|
||||
|
||||
func (n Node) String() string {
|
||||
return fmt.Sprintf("[%s:%d]/%d", n.Host, n.Port, n.ID)
|
||||
}
|
||||
|
||||
// UUID Kafka UUID representation
|
||||
type UUID struct {
|
||||
// Most Significant Bits.
|
||||
mostSignificantBits int64
|
||||
// Least Significant Bits.
|
||||
leastSignificantBits int64
|
||||
// Base64 representation
|
||||
base64str string
|
||||
}
|
||||
|
||||
// Base64 string representation of the UUID
|
||||
func (uuid UUID) String() string {
|
||||
return uuid.base64str
|
||||
}
|
||||
|
||||
// GetMostSignificantBits returns Most Significant 64 bits of the 128 bits UUID
|
||||
func (uuid UUID) GetMostSignificantBits() int64 {
|
||||
return uuid.mostSignificantBits
|
||||
}
|
||||
|
||||
// GetLeastSignificantBits returns Least Significant 64 bits of the 128 bits UUID
|
||||
func (uuid UUID) GetLeastSignificantBits() int64 {
|
||||
return uuid.leastSignificantBits
|
||||
}
|
||||
|
||||
// ConsumerGroupTopicPartitions represents a consumer group's TopicPartitions.
|
||||
type ConsumerGroupTopicPartitions struct {
|
||||
// Group name
|
||||
Group string
|
||||
// Partitions list
|
||||
Partitions []TopicPartition
|
||||
}
|
||||
|
||||
func (gtp ConsumerGroupTopicPartitions) String() string {
|
||||
res := gtp.Group
|
||||
res += "[ "
|
||||
for _, tp := range gtp.Partitions {
|
||||
res += tp.String() + " "
|
||||
}
|
||||
res += "]"
|
||||
return res
|
||||
}
|
||||
|
||||
// new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t
|
||||
// from a TopicPartition array.
|
||||
func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) {
|
||||
cparts = C.rd_kafka_topic_partition_list_new(C.int(len(partitions)))
|
||||
for _, part := range partitions {
|
||||
ctopic := C.CString(*part.Topic)
|
||||
defer C.free(unsafe.Pointer(ctopic))
|
||||
rktpar := C.rd_kafka_topic_partition_list_add(cparts, ctopic, C.int32_t(part.Partition))
|
||||
rktpar.offset = C.int64_t(part.Offset)
|
||||
|
||||
if part.Metadata != nil {
|
||||
cmetadata := C.CString(*part.Metadata)
|
||||
rktpar.metadata = unsafe.Pointer(cmetadata)
|
||||
rktpar.metadata_size = C.size_t(len(*part.Metadata))
|
||||
}
|
||||
|
||||
if part.LeaderEpoch != nil {
|
||||
cLeaderEpoch := C.int32_t(*part.LeaderEpoch)
|
||||
C.rd_kafka_topic_partition_set_leader_epoch(rktpar, cLeaderEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
return cparts
|
||||
}
|
||||
|
||||
func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kafka_topic_partition_t) {
|
||||
|
||||
topic := C.GoString(crktpar.topic)
|
||||
partition.Topic = &topic
|
||||
partition.Partition = int32(crktpar.partition)
|
||||
partition.Offset = Offset(crktpar.offset)
|
||||
if crktpar.metadata_size > 0 {
|
||||
size := C.int(crktpar.metadata_size)
|
||||
cstr := (*C.char)(unsafe.Pointer(crktpar.metadata))
|
||||
metadata := C.GoStringN(cstr, size)
|
||||
partition.Metadata = &metadata
|
||||
}
|
||||
if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
partition.Error = newError(crktpar.err)
|
||||
}
|
||||
|
||||
cLeaderEpoch := int32(C.rd_kafka_topic_partition_get_leader_epoch(crktpar))
|
||||
if cLeaderEpoch >= 0 {
|
||||
partition.LeaderEpoch = &cLeaderEpoch
|
||||
}
|
||||
}
|
||||
|
||||
func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) {
|
||||
|
||||
partcnt := int(cparts.cnt)
|
||||
|
||||
partitions = make([]TopicPartition, partcnt)
|
||||
for i := 0; i < partcnt; i++ {
|
||||
crktpar := C._c_rdkafka_topic_partition_list_entry(cparts, C.int(i))
|
||||
setupTopicPartitionFromCrktpar(&partitions[i], crktpar)
|
||||
}
|
||||
|
||||
return partitions
|
||||
}
|
||||
|
||||
// cToConsumerGroupTopicPartitions converts a C rd_kafka_group_result_t array to a
|
||||
// ConsumerGroupTopicPartitions slice.
|
||||
func (a *AdminClient) cToConsumerGroupTopicPartitions(
|
||||
cGroupResults **C.rd_kafka_group_result_t,
|
||||
cGroupCount C.size_t) (result []ConsumerGroupTopicPartitions) {
|
||||
result = make([]ConsumerGroupTopicPartitions, uint(cGroupCount))
|
||||
|
||||
for i := uint(0); i < uint(cGroupCount); i++ {
|
||||
cGroupResult := C.group_result_by_idx(cGroupResults, cGroupCount, C.size_t(i))
|
||||
cGroupPartitions := C.rd_kafka_group_result_partitions(cGroupResult)
|
||||
result[i] = ConsumerGroupTopicPartitions{
|
||||
Group: C.GoString(C.rd_kafka_group_result_name(cGroupResult)),
|
||||
Partitions: newTopicPartitionsFromCparts(cGroupPartitions),
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// LibraryVersion returns the underlying librdkafka library version as a
|
||||
// (version_int, version_str) tuple.
|
||||
func LibraryVersion() (int, string) {
|
||||
ver := (int)(C.rd_kafka_version())
|
||||
verstr := C.GoString(C.rd_kafka_version_str())
|
||||
return ver, verstr
|
||||
}
|
||||
|
||||
// setSaslCredentials sets the SASL credentials used for the specified Kafka client.
|
||||
// The new credentials will overwrite the old ones (which were set when creating the
|
||||
// client or by a previous call to setSaslCredentials). The new credentials will be
|
||||
// used the next time the client needs to establish a connection to the broker. This
|
||||
// function will *not* break existing broker connections that were established with the
|
||||
// old credentials. This method applies only to the SASL PLAIN and SCRAM mechanisms.
|
||||
func setSaslCredentials(rk *C.rd_kafka_t, username, password string) error {
|
||||
cUsername := C.CString(username)
|
||||
defer C.free(unsafe.Pointer(cUsername))
|
||||
cPassword := C.CString(password)
|
||||
defer C.free(unsafe.Pointer(cPassword))
|
||||
|
||||
if err := C.rd_kafka_sasl_set_credentials(rk, cUsername, cPassword); err != nil {
|
||||
return newErrorFromCErrorDestroy(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
*.tar.gz
|
||||
*.tgz
|
||||
tmp*
|
||||
393
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt
generated
vendored
393
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt
generated
vendored
@@ -1,393 +0,0 @@
|
||||
LICENSE
|
||||
--------------------------------------------------------------
|
||||
librdkafka - Apache Kafka C driver library
|
||||
|
||||
Copyright (c) 2012-2022, Magnus Edenhill
|
||||
2023, Confluent Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
LICENSE.cjson
|
||||
--------------------------------------------------------------
|
||||
For cJSON.c and cJSON.h:
|
||||
|
||||
Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
|
||||
|
||||
LICENSE.crc32c
|
||||
--------------------------------------------------------------
|
||||
# For src/crc32c.c copied (with modifications) from
|
||||
# http://stackoverflow.com/a/17646775/1821055
|
||||
|
||||
/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
|
||||
* Copyright (C) 2013 Mark Adler
|
||||
* Version 1.1 1 Aug 2013 Mark Adler
|
||||
*/
|
||||
|
||||
/*
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the author be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Mark Adler
|
||||
madler@alumni.caltech.edu
|
||||
*/
|
||||
|
||||
|
||||
LICENSE.fnv1a
|
||||
--------------------------------------------------------------
|
||||
parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c
|
||||
|
||||
|
||||
Please do not copyright this code. This code is in the public domain.
|
||||
|
||||
LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
|
||||
EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
|
||||
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
By:
|
||||
chongo <Landon Curt Noll> /\oo/\
|
||||
http://www.isthe.com/chongo/
|
||||
|
||||
Share and Enjoy! :-)
|
||||
|
||||
|
||||
LICENSE.hdrhistogram
|
||||
--------------------------------------------------------------
|
||||
This license covers src/rdhdrhistogram.c which is a C port of
|
||||
Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram
|
||||
at revision 3a0bb77429bd3a61596f5e8a3172445844342120
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Coda Hale
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE
|
||||
|
||||
|
||||
LICENSE.lz4
|
||||
--------------------------------------------------------------
|
||||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
|
||||
|
||||
LZ4 Library
|
||||
Copyright (c) 2011-2016, Yann Collet
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
LICENSE.murmur2
|
||||
--------------------------------------------------------------
|
||||
parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
|
||||
|
||||
|
||||
MurMurHash2 Library
|
||||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash2 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
LICENSE.pycrc
|
||||
--------------------------------------------------------------
|
||||
The following license applies to the files rdcrc32.c and rdcrc32.h which
|
||||
have been generated by the pycrc tool.
|
||||
============================================================================
|
||||
|
||||
Copyright (c) 2006-2012, Thomas Pircher <tehpeh@gmx.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
|
||||
LICENSE.queue
|
||||
--------------------------------------------------------------
|
||||
For sys/queue.h:
|
||||
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 4. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)queue.h 8.5 (Berkeley) 8/20/94
|
||||
* $FreeBSD$
|
||||
|
||||
LICENSE.regexp
|
||||
--------------------------------------------------------------
|
||||
regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
|
||||
|
||||
"
|
||||
These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
|
||||
"
|
||||
|
||||
|
||||
LICENSE.snappy
|
||||
--------------------------------------------------------------
|
||||
######################################################################
|
||||
# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h #
|
||||
# originally retrieved from http://github.com/andikleen/snappy-c #
|
||||
# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 #
|
||||
######################################################################
|
||||
|
||||
The snappy-c code is under the same license as the original snappy source
|
||||
|
||||
Copyright 2011 Intel Corporation All Rights Reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
|
||||
LICENSE.tinycthread
|
||||
--------------------------------------------------------------
|
||||
From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright (c) 2012 Marcus Geelnard
|
||||
2013-2014 Evan Nemerson
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
|
||||
|
||||
LICENSE.wingetopt
|
||||
--------------------------------------------------------------
|
||||
For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
|
||||
|
||||
/*
|
||||
* Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*
|
||||
* Sponsored in part by the Defense Advanced Research Projects
|
||||
* Agency (DARPA) and Air Force Research Laboratory, Air Force
|
||||
* Materiel Command, USAF, under agreement number F39502-99-1-0512.
|
||||
*/
|
||||
/*-
|
||||
* Copyright (c) 2000 The NetBSD Foundation, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to The NetBSD Foundation
|
||||
* by Dieter Baron and Thomas Klausner.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
||||
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
25
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md
generated
vendored
25
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md
generated
vendored
@@ -1,25 +0,0 @@
|
||||
# Bundling prebuilt librdkafka
|
||||
|
||||
confluent-kafka-go bundles prebuilt statically linked
|
||||
versions of librdkafka for the following platforms:
|
||||
|
||||
* MacOSX x64, arm64 (aka Darwin)
|
||||
* Linux glibc x64, arm64 (Ubuntu, CentOS, etc)
|
||||
* Linux musl x64, arm64 (Alpine)
|
||||
* Windows x64
|
||||
|
||||
## Import static librdkafka bundle
|
||||
|
||||
First create the static librdkafka bundle following the instructions in
|
||||
librdkafka's packaging/nuget/README.md.
|
||||
|
||||
Then import the new version by using the import.sh script here, this script
|
||||
will create a branch, import the bundle, create a commit and push the
|
||||
branch to Github for PR review. This PR must be manually opened, reviewed
|
||||
and then finally merged (make sure to merge it, DO NOT squash or rebase).
|
||||
|
||||
$ ./import.sh ~/path/to/librdkafka-static-bundle-v1.4.0.tgz
|
||||
|
||||
This will copy the static library and the rdkafka.h header file
|
||||
to this directory, as well as generate a new ../build_..go file
|
||||
for this platform + variant.
|
||||
@@ -1,121 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Updates the bundled prebuilt librdkafka libraries to specified version.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 librdkafka-static-bundle-<VERSION>.tgz"
|
||||
echo ""
|
||||
echo "This tool must be run from the TOPDIR/kafka/librdkafka_vendor directory"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
# Parse dynamic libraries from linker command line.
|
||||
# Will print a list matching -lfoo and -framework X..
|
||||
parse_dynlibs() {
|
||||
local libs=
|
||||
while [[ $# -gt 0 ]]; do
|
||||
if [[ $1 == -l* ]]; then
|
||||
libs="${libs} $1"
|
||||
elif [[ $1 == -framework ]]; then
|
||||
libs="${libs} $1 $2"
|
||||
shift # remove one (extra) arg
|
||||
fi
|
||||
shift # remove one arg
|
||||
done
|
||||
|
||||
echo "$libs"
|
||||
}
|
||||
|
||||
# Parse dynamic library dependecies from pkg-config file and print
|
||||
# them to stdout.
|
||||
parse_pc_dynlibs() {
|
||||
local pc=$1
|
||||
parse_dynlibs $(sed -n 's/^Libs: \(..*\)/\1/p' "$pc")
|
||||
}
|
||||
|
||||
setup_build() {
|
||||
# Copies static library from the temp directory into final location,
|
||||
# extracts dynamic lib list from the pkg-config file,
|
||||
# and generates the build_..go file
|
||||
local btype=$1
|
||||
local apath=$2
|
||||
local pc=$3
|
||||
local srcinfo=$4
|
||||
local build_tag=
|
||||
local gpath="../build_${btype}.go"
|
||||
local dpath="librdkafka_${btype}.a"
|
||||
|
||||
if [[ $btype =~ ^glibc_linux.*$ ]]; then
|
||||
build_tag="// +build !musl"
|
||||
elif [[ $btype =~ ^musl_linux.*$ ]]; then
|
||||
build_tag="// +build musl"
|
||||
fi
|
||||
|
||||
local dynlibs=$(parse_pc_dynlibs $pc)
|
||||
|
||||
echo "Copying $apath to $dpath"
|
||||
cp "$apath" "$dpath"
|
||||
|
||||
echo "Generating $gpath (extra build tag: $build_tag)"
|
||||
|
||||
cat >$gpath <<EOF
|
||||
// +build !dynamic
|
||||
$build_tag
|
||||
|
||||
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||
|
||||
package kafka
|
||||
|
||||
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||
// #cgo LDFLAGS: \${SRCDIR}/librdkafka_vendor/${dpath} $dynlibs
|
||||
import "C"
|
||||
|
||||
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||
const LibrdkafkaLinkInfo = "static ${btype} from ${srcinfo}"
|
||||
EOF
|
||||
|
||||
git add "$dpath" "$gpath"
|
||||
|
||||
}
|
||||
|
||||
|
||||
bundle="$1"
|
||||
[[ -f $bundle ]] || usage
|
||||
|
||||
bundlename=$(basename "$bundle")
|
||||
|
||||
bdir=$(mktemp -d tmpXXXXXX)
|
||||
|
||||
echo "Extracting bundle $bundle:"
|
||||
tar -xzvf "$bundle" -C "$bdir/"
|
||||
|
||||
echo "Copying librdkafka files"
|
||||
for f in rdkafka.h LICENSES.txt ; do
|
||||
cp $bdir/$f . || true
|
||||
git add "$f"
|
||||
done
|
||||
|
||||
|
||||
for btype in glibc_linux_amd64 \
|
||||
glibc_linux_arm64 \
|
||||
musl_linux_amd64 \
|
||||
musl_linux_arm64 \
|
||||
darwin_amd64 \
|
||||
darwin_arm64 \
|
||||
windows ; do
|
||||
lib=$bdir/librdkafka_${btype}.a
|
||||
pc=${lib/%.a/.pc}
|
||||
[[ -f $lib ]] || (echo "Expected file $lib missing" ; exit 1)
|
||||
[[ -f $pc ]] || (echo "Expected file $pc missing" ; exit 1)
|
||||
|
||||
setup_build $btype $lib $pc $bundlename
|
||||
done
|
||||
|
||||
rm -rf "$bdir"
|
||||
|
||||
echo "All done"
|
||||
113
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/import.sh
generated
vendored
113
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/import.sh
generated
vendored
@@ -1,113 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
# Import a new version of librdkafka based on a librdkafka static bundle.
|
||||
# This will create a separate branch, import librdkafka, make a commit,
|
||||
# and then ask you to push the branch to github, have it reviewed,
|
||||
# and then later merged (NOT squashed or rebased).
|
||||
# Having a merge per import allows future shallow clones to skip and ignore
|
||||
# older imports, hopefully reducing the amount of git history data 'go get'
|
||||
# needs to download.
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [--devel] path/to/librdkafka-static-bundle-<VERSION>.tgz"
|
||||
echo ""
|
||||
echo "This tool must be run from the TOPDIR/kafka/librdkafka directory"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --devel - Development use: No branch checks and does not push to github"
|
||||
exit 1
|
||||
}
|
||||
|
||||
error_cleanup() {
|
||||
echo "Error occurred, cleaning up"
|
||||
git checkout $currbranch
|
||||
git branch -D $import_branch
|
||||
exit 1
|
||||
}
|
||||
|
||||
devel=0
|
||||
if [[ $1 == --devel ]]; then
|
||||
devel=1
|
||||
shift
|
||||
fi
|
||||
|
||||
bundle="$1"
|
||||
[[ -f $bundle ]] || usage
|
||||
|
||||
# Parse the librdkafka version from the bundle
|
||||
bundlename=$(basename $bundle)
|
||||
version=${bundlename#librdkafka-static-bundle-}
|
||||
version=${version%.tgz}
|
||||
|
||||
if [[ -z $version ]]; then
|
||||
echo "Error: Could not parse version from bundle $bundle"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify branch state
|
||||
curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-)
|
||||
uncommitted=$(git status --untracked-files=no --porcelain)
|
||||
|
||||
if [[ ! -z $uncommitted ]]; then
|
||||
echo "Error: This script must be run on a clean branch with no uncommitted changes"
|
||||
echo "Uncommitted files:"
|
||||
echo "$uncommitted"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $devel != 1 ]] && [[ $curr_branch != master ]] ; then
|
||||
echo "Error: This script must be run on an up-to-date, clean, master branch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Create import branch, import bundle, commit.
|
||||
import_branch="import_$version"
|
||||
|
||||
exists=$(git branch -rlq | grep "/$import_branch\$" || true)
|
||||
if [[ ! -z $exists ]]; then
|
||||
echo "Error: This version branch already seems to exist: $exists: already imorted?"
|
||||
[[ $devel != 1 ]] && exit 1
|
||||
fi
|
||||
|
||||
echo "Checking for existing commits that match this version (should be none)"
|
||||
git log --oneline | grep "^librdkafka static bundle $version\$" && exit 1
|
||||
|
||||
|
||||
echo "Creating import branch $import_branch"
|
||||
git checkout -b $import_branch
|
||||
|
||||
echo "Importing bundle $bundle"
|
||||
./bundle-import.sh "$bundle" || error_cleanup
|
||||
|
||||
echo "Committing $version"
|
||||
git commit -a -m "librdkafka static bundle $version" || error_cleanup
|
||||
|
||||
echo "Updating error codes and docs"
|
||||
pushd ../../
|
||||
make -f mk/Makefile docs || error_cleanup
|
||||
git commit -a -m "Documentation and error code update for librdkafka $version" \
|
||||
|| error_cleanup
|
||||
popd
|
||||
|
||||
if [[ $devel != 1 ]]; then
|
||||
echo "Pushing branch"
|
||||
git push origin $import_branch || error_cleanup
|
||||
fi
|
||||
|
||||
git checkout $curr_branch
|
||||
|
||||
if [[ $devel != 1 ]]; then
|
||||
git branch -D $import_branch
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "############## IMPORT OF $version COMPLETE ##############"
|
||||
if [[ $devel != 1 ]]; then
|
||||
echo "Branch $import_branch has been pushed."
|
||||
echo "Create a PR, have it reviewed and then merge it (do NOT squash or rebase)."
|
||||
fi
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package librdkafka
|
||||
|
||||
// LibrdkafkaGoSubdir is a dummy variable needed to export something so the
|
||||
// file is not empty.
|
||||
var LibrdkafkaGoSubdir = true
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
10337
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h
generated
vendored
10337
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h
generated
vendored
File diff suppressed because it is too large
Load Diff
331
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka_mock.h
generated
vendored
331
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka_mock.h
generated
vendored
@@ -1,331 +0,0 @@
|
||||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2019 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _RDKAFKA_MOCK_H_
|
||||
#define _RDKAFKA_MOCK_H_
|
||||
|
||||
#ifndef _RDKAFKA_H_
|
||||
#error "rdkafka_mock.h must be included after rdkafka.h"
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#if 0
|
||||
} /* Restore indent */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* @name Mock cluster
|
||||
*
|
||||
* Provides a mock Kafka cluster with a configurable number of brokers
|
||||
* that support a reasonable subset of Kafka protocol operations,
|
||||
* error injection, etc.
|
||||
*
|
||||
* There are two ways to use the mock clusters, the most simple approach
|
||||
* is to configure `test.mock.num.brokers` (to e.g. 3) on the rd_kafka_t
|
||||
* in an existing application, which will replace the configured
|
||||
* `bootstrap.servers` with the mock cluster brokers.
|
||||
* This approach is convenient to easily test existing applications.
|
||||
*
|
||||
* The second approach is to explicitly create a mock cluster on an
|
||||
* rd_kafka_t instance by using rd_kafka_mock_cluster_new().
|
||||
*
|
||||
* Mock clusters provide localhost listeners that can be used as the bootstrap
|
||||
* servers by multiple rd_kafka_t instances.
|
||||
*
|
||||
* Currently supported functionality:
|
||||
* - Producer
|
||||
* - Idempotent Producer
|
||||
* - Transactional Producer
|
||||
* - Low-level consumer
|
||||
* - High-level balanced consumer groups with offset commits
|
||||
* - Topic Metadata and auto creation
|
||||
*
|
||||
* @remark High-level consumers making use of the balanced consumer groups
|
||||
* are not supported.
|
||||
*
|
||||
* @remark This is an experimental public API that is NOT covered by the
|
||||
* librdkafka API or ABI stability guarantees.
|
||||
*
|
||||
*
|
||||
* @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Create new mock cluster with \p broker_cnt brokers.
|
||||
*
|
||||
* The broker ids will start at 1 up to and including \p broker_cnt.
|
||||
*
|
||||
* The \p rk instance is required for internal book keeping but continues
|
||||
* to operate as usual.
|
||||
*/
|
||||
RD_EXPORT
|
||||
rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk,
|
||||
int broker_cnt);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Destroy mock cluster.
|
||||
*/
|
||||
RD_EXPORT
|
||||
void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @returns the rd_kafka_t instance for a cluster as passed to
|
||||
* rd_kafka_mock_cluster_new().
|
||||
*/
|
||||
RD_EXPORT rd_kafka_t *
|
||||
rd_kafka_mock_cluster_handle (const rd_kafka_mock_cluster_t *mcluster);
|
||||
|
||||
|
||||
/**
|
||||
* @returns the rd_kafka_mock_cluster_t instance as created by
|
||||
* setting the `test.mock.num.brokers` configuration property,
|
||||
* or NULL if no such instance.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_mock_cluster_t *
|
||||
rd_kafka_handle_mock_cluster (const rd_kafka_t *rk);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @returns the mock cluster's bootstrap.servers list
|
||||
*/
|
||||
RD_EXPORT const char *
|
||||
rd_kafka_mock_cluster_bootstraps (const rd_kafka_mock_cluster_t *mcluster);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Clear the cluster's error state for the given \p ApiKey.
|
||||
*/
|
||||
RD_EXPORT
|
||||
void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster,
|
||||
int16_t ApiKey);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Push \p cnt errors in the \p ... va-arg list onto the cluster's
|
||||
* error stack for the given \p ApiKey.
|
||||
*
|
||||
* \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
|
||||
*
|
||||
* The following \p cnt protocol requests matching \p ApiKey will fail with the
|
||||
* provided error code and removed from the stack, starting with
|
||||
* the first error code, then the second, etc.
|
||||
*
|
||||
* Passing \c RD_KAFKA_RESP_ERR__TRANSPORT will make the mock broker
|
||||
* disconnect the client which can be useful to trigger a disconnect on certain
|
||||
* requests.
|
||||
*/
|
||||
RD_EXPORT
|
||||
void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster,
|
||||
int16_t ApiKey, size_t cnt, ...);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Same as rd_kafka_mock_push_request_errors() but takes
|
||||
* an array of errors.
|
||||
*/
|
||||
RD_EXPORT void
|
||||
rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster,
|
||||
int16_t ApiKey,
|
||||
size_t cnt,
|
||||
const rd_kafka_resp_err_t *errors);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Push \p cnt errors and RTT tuples in the \p ... va-arg list onto
|
||||
* the broker's error stack for the given \p ApiKey.
|
||||
*
|
||||
* \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
|
||||
*
|
||||
* Each entry is a tuple of:
|
||||
* rd_kafka_resp_err_t err - error to return (or 0)
|
||||
* int rtt_ms - response RTT/delay in milliseconds (or 0)
|
||||
*
|
||||
* The following \p cnt protocol requests matching \p ApiKey will fail with the
|
||||
* provided error code and removed from the stack, starting with
|
||||
* the first error code, then the second, etc.
|
||||
*
|
||||
* @remark The broker errors take precedence over the cluster errors.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster,
|
||||
int32_t broker_id,
|
||||
int16_t ApiKey, size_t cnt, ...);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Set the topic error to return in protocol requests.
|
||||
*
|
||||
* Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest.
|
||||
*/
|
||||
RD_EXPORT
|
||||
void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster,
|
||||
const char *topic,
|
||||
rd_kafka_resp_err_t err);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Creates a topic.
|
||||
*
|
||||
* This is an alternative to automatic topic creation as performed by
|
||||
* the client itself.
|
||||
*
|
||||
* @remark The Topic Admin API (CreateTopics) is not supported by the
|
||||
* mock broker.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_topic_create (rd_kafka_mock_cluster_t *mcluster,
|
||||
const char *topic, int partition_cnt,
|
||||
int replication_factor);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Sets the partition leader.
|
||||
*
|
||||
* The topic will be created if it does not exist.
|
||||
*
|
||||
* \p broker_id needs to be an existing broker, or -1 to make the
|
||||
* partition leader-less.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_partition_set_leader (rd_kafka_mock_cluster_t *mcluster,
|
||||
const char *topic, int32_t partition,
|
||||
int32_t broker_id);
|
||||
|
||||
/**
|
||||
* @brief Sets the partition's preferred replica / follower.
|
||||
*
|
||||
* The topic will be created if it does not exist.
|
||||
*
|
||||
* \p broker_id does not need to point to an existing broker.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_partition_set_follower (rd_kafka_mock_cluster_t *mcluster,
|
||||
const char *topic, int32_t partition,
|
||||
int32_t broker_id);
|
||||
|
||||
/**
|
||||
* @brief Sets the partition's preferred replica / follower low and high
|
||||
* watermarks.
|
||||
*
|
||||
* The topic will be created if it does not exist.
|
||||
*
|
||||
* Setting an offset to -1 will revert back to the leader's corresponding
|
||||
* watermark.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_partition_set_follower_wmarks (rd_kafka_mock_cluster_t *mcluster,
|
||||
const char *topic,
|
||||
int32_t partition,
|
||||
int64_t lo, int64_t hi);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Disconnects the broker and disallows any new connections.
|
||||
* This does NOT trigger leader change.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_broker_set_down (rd_kafka_mock_cluster_t *mcluster,
|
||||
int32_t broker_id);
|
||||
|
||||
/**
|
||||
* @brief Makes the broker accept connections again.
|
||||
* This does NOT trigger leader change.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_broker_set_up (rd_kafka_mock_cluster_t *mcluster,
|
||||
int32_t broker_id);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Set broker round-trip-time delay in milliseconds.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_broker_set_rtt (rd_kafka_mock_cluster_t *mcluster,
|
||||
int32_t broker_id, int rtt_ms);
|
||||
|
||||
/**
|
||||
* @brief Sets the broker's rack as reported in Metadata to the client.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_broker_set_rack (rd_kafka_mock_cluster_t *mcluster,
|
||||
int32_t broker_id, const char *rack);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Explicitly sets the coordinator. If this API is not a standard
|
||||
* hashing scheme will be used.
|
||||
*
|
||||
* @param key_type "transaction" or "group"
|
||||
* @param key The transactional.id or group.id
|
||||
* @param broker_id The new coordinator, does not have to be a valid broker.
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_coordinator_set (rd_kafka_mock_cluster_t *mcluster,
|
||||
const char *key_type, const char *key,
|
||||
int32_t broker_id);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Set the allowed ApiVersion range for \p ApiKey.
|
||||
*
|
||||
* Set \p MinVersion and \p MaxVersion to -1 to disable the API
|
||||
* completely.
|
||||
*
|
||||
* \p MaxVersion MUST not exceed the maximum implemented value,
|
||||
* see rdkafka_mock_handlers.c.
|
||||
*
|
||||
* @param ApiKey Protocol request type/key
|
||||
* @param MinVersion Minimum version supported (or -1 to disable).
|
||||
* @param MinVersion Maximum version supported (or -1 to disable).
|
||||
*/
|
||||
RD_EXPORT rd_kafka_resp_err_t
|
||||
rd_kafka_mock_set_apiversion (rd_kafka_mock_cluster_t *mcluster,
|
||||
int16_t ApiKey,
|
||||
int16_t MinVersion, int16_t MaxVersion);
|
||||
|
||||
|
||||
/**@}*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* _RDKAFKA_MOCK_H_ */
|
||||
89
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/log.go
generated
vendored
89
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/log.go
generated
vendored
@@ -1,89 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
#include "select_rdkafka.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// LogEvent represent the log from librdkafka internal log queue
|
||||
type LogEvent struct {
|
||||
Name string // Name of client instance
|
||||
Tag string // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
|
||||
Message string // Log message
|
||||
Level int // Log syslog level, lower is more critical.
|
||||
Timestamp time.Time // Log timestamp
|
||||
}
|
||||
|
||||
// newLogEvent creates a new LogEvent from the given rd_kafka_event_t.
|
||||
//
|
||||
// This function does not take ownership of the cEvent pointer. You need to
|
||||
// free its resources using C.rd_kafka_event_destroy afterwards.
|
||||
//
|
||||
// The cEvent object needs to be of type C.RD_KAFKA_EVENT_LOG. Calling this
|
||||
// function with an object of another type has undefined behaviour.
|
||||
func (h *handle) newLogEvent(cEvent *C.rd_kafka_event_t) LogEvent {
|
||||
var tag, message *C.char
|
||||
var level C.int
|
||||
|
||||
C.rd_kafka_event_log(cEvent, &(tag), &(message), &(level))
|
||||
|
||||
return LogEvent{
|
||||
Name: h.name,
|
||||
Tag: C.GoString(tag),
|
||||
Message: C.GoString(message),
|
||||
Level: int(level),
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// pollLogEvents polls log events from librdkafka and pushes them to toChannel,
|
||||
// until doneChan is closed.
|
||||
//
|
||||
// Each call to librdkafka times out after timeoutMs. If a call to librdkafka
|
||||
// is ongoing when doneChan is closed, the function will wait until the call
|
||||
// returns or times out, whatever happens first.
|
||||
func (h *handle) pollLogEvents(toChannel chan LogEvent, timeoutMs int, doneChan chan bool) {
|
||||
for {
|
||||
select {
|
||||
case <-doneChan:
|
||||
return
|
||||
|
||||
default:
|
||||
cEvent := C.rd_kafka_queue_poll(h.logq, C.int(timeoutMs))
|
||||
if cEvent == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if C.rd_kafka_event_type(cEvent) != C.RD_KAFKA_EVENT_LOG {
|
||||
C.rd_kafka_event_destroy(cEvent)
|
||||
continue
|
||||
}
|
||||
|
||||
logEvent := h.newLogEvent(cEvent)
|
||||
C.rd_kafka_event_destroy(cEvent)
|
||||
|
||||
select {
|
||||
case <-doneChan:
|
||||
return
|
||||
|
||||
case toChannel <- logEvent:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (logEvent LogEvent) String() string {
|
||||
return fmt.Sprintf(
|
||||
"[%v][%s][%s][%d]%s",
|
||||
logEvent.Timestamp.Format(time.RFC3339),
|
||||
logEvent.Name,
|
||||
logEvent.Tag,
|
||||
logEvent.Level,
|
||||
logEvent.Message)
|
||||
}
|
||||
229
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/message.go
generated
vendored
229
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/message.go
generated
vendored
@@ -1,229 +0,0 @@
|
||||
package kafka
|
||||
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
#include "glue_rdkafka.h"
|
||||
|
||||
void setup_rkmessage (rd_kafka_message_t *rkmessage,
|
||||
rd_kafka_topic_t *rkt, int32_t partition,
|
||||
const void *payload, size_t len,
|
||||
void *key, size_t keyLen, void *opaque) {
|
||||
rkmessage->rkt = rkt;
|
||||
rkmessage->partition = partition;
|
||||
rkmessage->payload = (void *)payload;
|
||||
rkmessage->len = len;
|
||||
rkmessage->key = (void *)key;
|
||||
rkmessage->key_len = keyLen;
|
||||
rkmessage->_private = opaque;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// TimestampType is a the Message timestamp type or source
|
||||
type TimestampType int
|
||||
|
||||
const (
|
||||
// TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
|
||||
TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
|
||||
// TimestampCreateTime indicates timestamp set by producer (source time)
|
||||
TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
|
||||
// TimestampLogAppendTime indicates timestamp set set by broker (store time)
|
||||
TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
|
||||
)
|
||||
|
||||
func (t TimestampType) String() string {
|
||||
switch t {
|
||||
case TimestampCreateTime:
|
||||
return "CreateTime"
|
||||
case TimestampLogAppendTime:
|
||||
return "LogAppendTime"
|
||||
case TimestampNotAvailable:
|
||||
fallthrough
|
||||
default:
|
||||
return "NotAvailable"
|
||||
}
|
||||
}
|
||||
|
||||
// Message represents a Kafka message
|
||||
type Message struct {
|
||||
TopicPartition TopicPartition
|
||||
Value []byte
|
||||
Key []byte
|
||||
Timestamp time.Time
|
||||
TimestampType TimestampType
|
||||
Opaque interface{}
|
||||
Headers []Header
|
||||
LeaderEpoch *int32 // Deprecated: LeaderEpoch or nil if not available. Use m.TopicPartition.LeaderEpoch instead.
|
||||
}
|
||||
|
||||
// String returns a human readable representation of a Message.
|
||||
// Key and payload are not represented.
|
||||
func (m *Message) String() string {
|
||||
var topic string
|
||||
if m.TopicPartition.Topic != nil {
|
||||
topic = *m.TopicPartition.Topic
|
||||
} else {
|
||||
topic = ""
|
||||
}
|
||||
return fmt.Sprintf("%s[%d]@%s", topic, m.TopicPartition.Partition, m.TopicPartition.Offset)
|
||||
}
|
||||
|
||||
func (h *handle) getRktFromMessage(msg *Message) (crkt *C.rd_kafka_topic_t) {
|
||||
if msg.TopicPartition.Topic == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return h.getRkt(*msg.TopicPartition.Topic)
|
||||
}
|
||||
|
||||
// setupHeadersFromGlueMsg converts the C tmp headers in gMsg to
|
||||
// Go Headers in msg.
|
||||
// gMsg.tmphdrs will be freed.
|
||||
func setupHeadersFromGlueMsg(msg *Message, gMsg *C.glue_msg_t) {
|
||||
msg.Headers = make([]Header, gMsg.tmphdrsCnt)
|
||||
for n := range msg.Headers {
|
||||
tmphdr := (*[1 << 30]C.tmphdr_t)(unsafe.Pointer(gMsg.tmphdrs))[n]
|
||||
msg.Headers[n].Key = C.GoString(tmphdr.key)
|
||||
if tmphdr.val != nil {
|
||||
msg.Headers[n].Value = C.GoBytes(unsafe.Pointer(tmphdr.val), C.int(tmphdr.size))
|
||||
} else {
|
||||
msg.Headers[n].Value = nil
|
||||
}
|
||||
}
|
||||
C.free(unsafe.Pointer(gMsg.tmphdrs))
|
||||
}
|
||||
|
||||
func (h *handle) newMessageFromGlueMsg(gMsg *C.glue_msg_t) (msg *Message) {
|
||||
msg = &Message{}
|
||||
|
||||
if gMsg.ts != -1 {
|
||||
ts := int64(gMsg.ts)
|
||||
msg.TimestampType = TimestampType(gMsg.tstype)
|
||||
msg.Timestamp = time.Unix(ts/1000, (ts%1000)*1000000)
|
||||
}
|
||||
|
||||
if gMsg.tmphdrsCnt > 0 {
|
||||
setupHeadersFromGlueMsg(msg, gMsg)
|
||||
}
|
||||
|
||||
h.setupMessageFromC(msg, gMsg.msg)
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
// setupMessageFromC sets up a message object from a C rd_kafka_message_t
|
||||
func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) {
|
||||
if cmsg.rkt != nil {
|
||||
topic := h.getTopicNameFromRkt(cmsg.rkt)
|
||||
msg.TopicPartition.Topic = &topic
|
||||
}
|
||||
msg.TopicPartition.Partition = int32(cmsg.partition)
|
||||
if cmsg.payload != nil && h.msgFields.Value {
|
||||
msg.Value = C.GoBytes(unsafe.Pointer(cmsg.payload), C.int(cmsg.len))
|
||||
}
|
||||
if cmsg.key != nil && h.msgFields.Key {
|
||||
msg.Key = C.GoBytes(unsafe.Pointer(cmsg.key), C.int(cmsg.key_len))
|
||||
}
|
||||
if h.msgFields.Headers {
|
||||
var gMsg C.glue_msg_t
|
||||
gMsg.msg = cmsg
|
||||
gMsg.want_hdrs = C.int8_t(1)
|
||||
chdrsToTmphdrs(&gMsg)
|
||||
if gMsg.tmphdrsCnt > 0 {
|
||||
setupHeadersFromGlueMsg(msg, &gMsg)
|
||||
}
|
||||
}
|
||||
msg.TopicPartition.Offset = Offset(cmsg.offset)
|
||||
if cmsg.err != 0 {
|
||||
msg.TopicPartition.Error = newError(cmsg.err)
|
||||
}
|
||||
|
||||
leaderEpoch := int32(C.rd_kafka_message_leader_epoch(cmsg))
|
||||
if leaderEpoch >= 0 {
|
||||
msg.LeaderEpoch = &leaderEpoch
|
||||
msg.TopicPartition.LeaderEpoch = &leaderEpoch
|
||||
}
|
||||
}
|
||||
|
||||
// newMessageFromC creates a new message object from a C rd_kafka_message_t
|
||||
// NOTE: For use with Producer: does not set message timestamp fields.
|
||||
func (h *handle) newMessageFromC(cmsg *C.rd_kafka_message_t) (msg *Message) {
|
||||
msg = &Message{}
|
||||
|
||||
h.setupMessageFromC(msg, cmsg)
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
// messageToC sets up cmsg as a clone of msg
|
||||
func (h *handle) messageToC(msg *Message, cmsg *C.rd_kafka_message_t) {
|
||||
var valp unsafe.Pointer
|
||||
var keyp unsafe.Pointer
|
||||
|
||||
// to circumvent Cgo constraints we need to allocate C heap memory
|
||||
// for both Value and Key (one allocation back to back)
|
||||
// and copy the bytes from Value and Key to the C memory.
|
||||
// We later tell librdkafka (in produce()) to free the
|
||||
// C memory pointer when it is done.
|
||||
var payload unsafe.Pointer
|
||||
|
||||
valueLen := 0
|
||||
keyLen := 0
|
||||
if msg.Value != nil {
|
||||
valueLen = len(msg.Value)
|
||||
}
|
||||
if msg.Key != nil {
|
||||
keyLen = len(msg.Key)
|
||||
}
|
||||
|
||||
allocLen := valueLen + keyLen
|
||||
if allocLen > 0 {
|
||||
payload = C.malloc(C.size_t(allocLen))
|
||||
if valueLen > 0 {
|
||||
copy((*[1 << 30]byte)(payload)[0:valueLen], msg.Value)
|
||||
valp = payload
|
||||
}
|
||||
if keyLen > 0 {
|
||||
copy((*[1 << 30]byte)(payload)[valueLen:allocLen], msg.Key)
|
||||
keyp = unsafe.Pointer(&((*[1 << 31]byte)(payload)[valueLen]))
|
||||
}
|
||||
}
|
||||
|
||||
cmsg.rkt = h.getRktFromMessage(msg)
|
||||
cmsg.partition = C.int32_t(msg.TopicPartition.Partition)
|
||||
cmsg.payload = valp
|
||||
cmsg.len = C.size_t(valueLen)
|
||||
cmsg.key = keyp
|
||||
cmsg.key_len = C.size_t(keyLen)
|
||||
cmsg._private = nil
|
||||
}
|
||||
|
||||
// used for testing messageToC performance
|
||||
func (h *handle) messageToCDummy(msg *Message) {
|
||||
var cmsg C.rd_kafka_message_t
|
||||
h.messageToC(msg, &cmsg)
|
||||
}
|
||||
180
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/metadata.go
generated
vendored
180
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/metadata.go
generated
vendored
@@ -1,180 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
|
||||
struct rd_kafka_metadata_broker *_getMetadata_broker_element(struct rd_kafka_metadata *m, int i) {
|
||||
return &m->brokers[i];
|
||||
}
|
||||
|
||||
struct rd_kafka_metadata_topic *_getMetadata_topic_element(struct rd_kafka_metadata *m, int i) {
|
||||
return &m->topics[i];
|
||||
}
|
||||
|
||||
struct rd_kafka_metadata_partition *_getMetadata_partition_element(struct rd_kafka_metadata *m, int topic_idx, int partition_idx) {
|
||||
return &m->topics[topic_idx].partitions[partition_idx];
|
||||
}
|
||||
|
||||
int32_t _get_int32_element (int32_t *arr, int i) {
|
||||
return arr[i];
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// BrokerMetadata contains per-broker metadata
|
||||
type BrokerMetadata struct {
|
||||
ID int32
|
||||
Host string
|
||||
Port int
|
||||
}
|
||||
|
||||
// PartitionMetadata contains per-partition metadata
|
||||
type PartitionMetadata struct {
|
||||
ID int32
|
||||
Error Error
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isrs []int32
|
||||
}
|
||||
|
||||
// TopicMetadata contains per-topic metadata
|
||||
type TopicMetadata struct {
|
||||
Topic string
|
||||
Partitions []PartitionMetadata
|
||||
Error Error
|
||||
}
|
||||
|
||||
// Metadata contains broker and topic metadata for all (matching) topics
|
||||
type Metadata struct {
|
||||
Brokers []BrokerMetadata
|
||||
Topics map[string]TopicMetadata
|
||||
|
||||
OriginatingBroker BrokerMetadata
|
||||
}
|
||||
|
||||
// getMetadata queries broker for cluster and topic metadata.
|
||||
// If topic is non-nil only information about that topic is returned, else if
|
||||
// allTopics is false only information about locally used topics is returned,
|
||||
// else information about all topics is returned.
|
||||
func getMetadata(H Handle, topic *string, allTopics bool, timeoutMs int) (*Metadata, error) {
|
||||
h := H.gethandle()
|
||||
|
||||
var rkt *C.rd_kafka_topic_t
|
||||
if topic != nil {
|
||||
rkt = h.getRkt(*topic)
|
||||
}
|
||||
|
||||
var cMd *C.struct_rd_kafka_metadata
|
||||
cErr := C.rd_kafka_metadata(h.rk, bool2cint(allTopics),
|
||||
rkt, &cMd, C.int(timeoutMs))
|
||||
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return nil, newError(cErr)
|
||||
}
|
||||
|
||||
m := Metadata{}
|
||||
defer C.rd_kafka_metadata_destroy(cMd)
|
||||
|
||||
m.Brokers = make([]BrokerMetadata, cMd.broker_cnt)
|
||||
for i := 0; i < int(cMd.broker_cnt); i++ {
|
||||
b := C._getMetadata_broker_element(cMd, C.int(i))
|
||||
m.Brokers[i] = BrokerMetadata{int32(b.id), C.GoString(b.host),
|
||||
int(b.port)}
|
||||
}
|
||||
|
||||
m.Topics = make(map[string]TopicMetadata, int(cMd.topic_cnt))
|
||||
for i := 0; i < int(cMd.topic_cnt); i++ {
|
||||
t := C._getMetadata_topic_element(cMd, C.int(i))
|
||||
|
||||
thisTopic := C.GoString(t.topic)
|
||||
m.Topics[thisTopic] = TopicMetadata{Topic: thisTopic,
|
||||
Error: newError(t.err),
|
||||
Partitions: make([]PartitionMetadata, int(t.partition_cnt))}
|
||||
|
||||
for j := 0; j < int(t.partition_cnt); j++ {
|
||||
p := C._getMetadata_partition_element(cMd, C.int(i), C.int(j))
|
||||
m.Topics[thisTopic].Partitions[j] = PartitionMetadata{
|
||||
ID: int32(p.id),
|
||||
Error: newError(p.err),
|
||||
Leader: int32(p.leader)}
|
||||
m.Topics[thisTopic].Partitions[j].Replicas = make([]int32, int(p.replica_cnt))
|
||||
for ir := 0; ir < int(p.replica_cnt); ir++ {
|
||||
m.Topics[thisTopic].Partitions[j].Replicas[ir] = int32(C._get_int32_element(p.replicas, C.int(ir)))
|
||||
}
|
||||
|
||||
m.Topics[thisTopic].Partitions[j].Isrs = make([]int32, int(p.isr_cnt))
|
||||
for ii := 0; ii < int(p.isr_cnt); ii++ {
|
||||
m.Topics[thisTopic].Partitions[j].Isrs[ii] = int32(C._get_int32_element(p.isrs, C.int(ii)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.OriginatingBroker = BrokerMetadata{int32(cMd.orig_broker_id),
|
||||
C.GoString(cMd.orig_broker_name), 0}
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// queryWatermarkOffsets returns the broker's low and high offsets for the given topic
|
||||
// and partition.
|
||||
func queryWatermarkOffsets(H Handle, topic string, partition int32, timeoutMs int) (low, high int64, err error) {
|
||||
h := H.gethandle()
|
||||
|
||||
ctopic := C.CString(topic)
|
||||
defer C.free(unsafe.Pointer(ctopic))
|
||||
|
||||
var cLow, cHigh C.int64_t
|
||||
|
||||
e := C.rd_kafka_query_watermark_offsets(h.rk, ctopic, C.int32_t(partition),
|
||||
&cLow, &cHigh, C.int(timeoutMs))
|
||||
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return 0, 0, newError(e)
|
||||
}
|
||||
|
||||
low = int64(cLow)
|
||||
high = int64(cHigh)
|
||||
return low, high, nil
|
||||
}
|
||||
|
||||
// getWatermarkOffsets returns the clients cached low and high offsets for the given topic
|
||||
// and partition.
|
||||
func getWatermarkOffsets(H Handle, topic string, partition int32) (low, high int64, err error) {
|
||||
h := H.gethandle()
|
||||
|
||||
ctopic := C.CString(topic)
|
||||
defer C.free(unsafe.Pointer(ctopic))
|
||||
|
||||
var cLow, cHigh C.int64_t
|
||||
|
||||
e := C.rd_kafka_get_watermark_offsets(h.rk, ctopic, C.int32_t(partition),
|
||||
&cLow, &cHigh)
|
||||
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return 0, 0, newError(e)
|
||||
}
|
||||
|
||||
low = int64(cLow)
|
||||
high = int64(cHigh)
|
||||
|
||||
return low, high, nil
|
||||
}
|
||||
35
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/misc.go
generated
vendored
35
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/misc.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
/**
|
||||
* Copyright 2016 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import "C"
|
||||
|
||||
// bool2int converts a bool to a C.int (1 or 0)
|
||||
func bool2cint(b bool) C.int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// cint2bool converts a C.int to a bool
|
||||
func cint2bool(v C.int) bool {
|
||||
if v == 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
134
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go
generated
vendored
134
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go
generated
vendored
@@ -1,134 +0,0 @@
|
||||
/**
|
||||
* Copyright 2023 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
#include "glue_rdkafka.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// MockCluster represents a Kafka mock cluster instance which can be used
|
||||
// for testing.
|
||||
type MockCluster struct {
|
||||
rk *C.rd_kafka_t
|
||||
mcluster *C.rd_kafka_mock_cluster_t
|
||||
}
|
||||
|
||||
// NewMockCluster provides a mock Kafka cluster with a configurable
|
||||
// number of brokers that support a reasonable subset of Kafka protocol
|
||||
// operations, error injection, etc.
|
||||
//
|
||||
// The broker ids will start at 1 up to and including brokerCount.
|
||||
//
|
||||
// Mock clusters provide localhost listeners that can be used as the bootstrap
|
||||
// servers by multiple Kafka client instances.
|
||||
//
|
||||
// Currently supported functionality:
|
||||
// - Producer
|
||||
// - Idempotent Producer
|
||||
// - Transactional Producer
|
||||
// - Low-level consumer
|
||||
// - High-level balanced consumer groups with offset commits
|
||||
// - Topic Metadata and auto creation
|
||||
//
|
||||
// Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
|
||||
func NewMockCluster(brokerCount int) (*MockCluster, error) {
|
||||
|
||||
mc := &MockCluster{}
|
||||
|
||||
cErrstr := (*C.char)(C.malloc(C.size_t(512)))
|
||||
defer C.free(unsafe.Pointer(cErrstr))
|
||||
|
||||
cConf := C.rd_kafka_conf_new()
|
||||
|
||||
mc.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256)
|
||||
if mc.rk == nil {
|
||||
C.rd_kafka_conf_destroy(cConf)
|
||||
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
|
||||
}
|
||||
|
||||
mc.mcluster = C.rd_kafka_mock_cluster_new(mc.rk, C.int(brokerCount))
|
||||
if mc.mcluster == nil {
|
||||
C.rd_kafka_destroy(mc.rk)
|
||||
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
|
||||
}
|
||||
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
// BootstrapServers returns the bootstrap.servers property for this MockCluster
|
||||
func (mc *MockCluster) BootstrapServers() string {
|
||||
return C.GoString(C.rd_kafka_mock_cluster_bootstraps(mc.mcluster))
|
||||
}
|
||||
|
||||
// SetRoundtripDuration sets the broker round-trip-time delay for the given broker.
|
||||
// Use brokerID -1 for all brokers, or >= 0 for a specific broker.
|
||||
func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error {
|
||||
durationInMillis := C.int(duration.Milliseconds())
|
||||
cError := C.rd_kafka_mock_broker_set_rtt(mc.mcluster, C.int(brokerID), durationInMillis)
|
||||
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return newError(cError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBrokerDown disconnects the broker and disallows any new connections.
|
||||
// This does NOT trigger leader change.
|
||||
// Use brokerID -1 for all brokers, or >= 0 for a specific broker.
|
||||
func (mc *MockCluster) SetBrokerDown(brokerID int) error {
|
||||
cError := C.rd_kafka_mock_broker_set_down(mc.mcluster, C.int(brokerID))
|
||||
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return newError(cError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBrokerUp makes the broker accept connections again.
|
||||
// This does NOT trigger leader change.
|
||||
// Use brokerID -1 for all brokers, or >= 0 for a specific broker.
|
||||
func (mc *MockCluster) SetBrokerUp(brokerID int) error {
|
||||
cError := C.rd_kafka_mock_broker_set_up(mc.mcluster, C.int(brokerID))
|
||||
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return newError(cError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateTopic creates a topic without having to use a producer
|
||||
func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error {
|
||||
topicStr := C.CString(topic)
|
||||
defer C.free(unsafe.Pointer(topicStr))
|
||||
|
||||
cError := C.rd_kafka_mock_topic_create(mc.mcluster, topicStr, C.int(partitions), C.int(replicationFactor))
|
||||
if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return newError(cError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close and destroy the MockCluster
|
||||
func (mc *MockCluster) Close() {
|
||||
C.rd_kafka_mock_cluster_destroy(mc.mcluster)
|
||||
C.rd_kafka_destroy(mc.rk)
|
||||
}
|
||||
147
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go
generated
vendored
147
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go
generated
vendored
@@ -1,147 +0,0 @@
|
||||
/**
|
||||
* Copyright 2017 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "select_rdkafka.h"
|
||||
|
||||
static int64_t _c_rdkafka_offset_tail(int64_t rel) {
|
||||
return RD_KAFKA_OFFSET_TAIL(rel);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Offset type (int64) with support for canonical names
|
||||
type Offset int64
|
||||
|
||||
// OffsetBeginning represents the earliest offset (logical)
|
||||
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
|
||||
|
||||
// OffsetEnd represents the latest offset (logical)
|
||||
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
|
||||
|
||||
// OffsetInvalid represents an invalid/unspecified offset
|
||||
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
|
||||
|
||||
// OffsetStored represents a stored offset
|
||||
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
|
||||
|
||||
func (o Offset) String() string {
|
||||
switch o {
|
||||
case OffsetBeginning:
|
||||
return "beginning"
|
||||
case OffsetEnd:
|
||||
return "end"
|
||||
case OffsetInvalid:
|
||||
return "unset"
|
||||
case OffsetStored:
|
||||
return "stored"
|
||||
default:
|
||||
return fmt.Sprintf("%d", int64(o))
|
||||
}
|
||||
}
|
||||
|
||||
// Set offset value, see NewOffset()
|
||||
func (o *Offset) Set(offset interface{}) error {
|
||||
n, err := NewOffset(offset)
|
||||
|
||||
if err == nil {
|
||||
*o = n
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewOffset creates a new Offset using the provided logical string, an
|
||||
// absolute int64 offset value, or a concrete Offset type.
|
||||
// Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored"
|
||||
func NewOffset(offset interface{}) (Offset, error) {
|
||||
|
||||
switch v := offset.(type) {
|
||||
case string:
|
||||
switch v {
|
||||
case "beginning":
|
||||
fallthrough
|
||||
case "earliest":
|
||||
return Offset(OffsetBeginning), nil
|
||||
|
||||
case "end":
|
||||
fallthrough
|
||||
case "latest":
|
||||
return Offset(OffsetEnd), nil
|
||||
|
||||
case "unset":
|
||||
fallthrough
|
||||
case "invalid":
|
||||
return Offset(OffsetInvalid), nil
|
||||
|
||||
case "stored":
|
||||
return Offset(OffsetStored), nil
|
||||
|
||||
default:
|
||||
off, err := strconv.Atoi(v)
|
||||
return Offset(off), err
|
||||
}
|
||||
|
||||
case int:
|
||||
return Offset((int64)(v)), nil
|
||||
case int64:
|
||||
return Offset(v), nil
|
||||
case Offset:
|
||||
return Offset(v), nil
|
||||
default:
|
||||
return OffsetInvalid, newErrorFromString(ErrInvalidArg,
|
||||
fmt.Sprintf("Invalid offset type: %t", v))
|
||||
}
|
||||
}
|
||||
|
||||
// OffsetTail returns the logical offset relativeOffset from current end of partition
|
||||
func OffsetTail(relativeOffset Offset) Offset {
|
||||
return Offset(C._c_rdkafka_offset_tail(C.int64_t(relativeOffset)))
|
||||
}
|
||||
|
||||
// offsetsForTimes looks up offsets by timestamp for the given partitions.
|
||||
//
|
||||
// The returned offset for each partition is the earliest offset whose
|
||||
// timestamp is greater than or equal to the given timestamp in the
|
||||
// corresponding partition. If the provided timestamp exceeds that of the
|
||||
// last message in the partition, a value of -1 will be returned.
|
||||
//
|
||||
// The timestamps to query are represented as `.Offset` in the `times`
|
||||
// argument and the looked up offsets are represented as `.Offset` in the returned
|
||||
// `offsets` list.
|
||||
//
|
||||
// The function will block for at most timeoutMs milliseconds.
|
||||
//
|
||||
// Duplicate Topic+Partitions are not supported.
|
||||
// Per-partition errors may be returned in the `.Error` field.
|
||||
func offsetsForTimes(H Handle, times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) {
|
||||
cparts := newCPartsFromTopicPartitions(times)
|
||||
defer C.rd_kafka_topic_partition_list_destroy(cparts)
|
||||
cerr := C.rd_kafka_offsets_for_times(H.gethandle().rk, cparts, C.int(timeoutMs))
|
||||
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
|
||||
return nil, newError(cerr)
|
||||
}
|
||||
|
||||
return newTopicPartitionsFromCparts(cparts), nil
|
||||
}
|
||||
1039
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go
generated
vendored
1039
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go
generated
vendored
File diff suppressed because it is too large
Load Diff
31
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/select_rdkafka.h
generated
vendored
31
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/select_rdkafka.h
generated
vendored
@@ -1,31 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// This file uses a preprocessor macro defined by the various build_*.go
|
||||
// files to determine whether to import the bundled librdkafka header, or
|
||||
// the system one.
|
||||
// This is needed because cgo will automatically add -I. to the include
|
||||
// path, so <librdkafka/rdkafka.h> would find a bundled header instead of
|
||||
// the system one if it were called librdkafka/rdkafka.h instead of
|
||||
// librdkafka_vendor/rdkafka.h
|
||||
|
||||
#ifdef USE_VENDORED_LIBRDKAFKA
|
||||
#include "librdkafka_vendor/rdkafka.h"
|
||||
#include "librdkafka_vendor/rdkafka_mock.h"
|
||||
#else
|
||||
#include <librdkafka/rdkafka.h>
|
||||
#include <librdkafka/rdkafka_mock.h>
|
||||
#endif
|
||||
12
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json
generated
vendored
12
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json
generated
vendored
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"Brokers": "mybroker or $BROKERS env",
|
||||
"BrokersSasl": "mybroker or $BROKERSSASL env",
|
||||
"SaslUsername": "testuser",
|
||||
"SaslPassword": "testpass",
|
||||
"SaslMechanism": "PLAIN",
|
||||
"TopicName": "test",
|
||||
"GroupID": "testgroup",
|
||||
"PerfMsgCount": 1000000,
|
||||
"PerfMsgSize": 100,
|
||||
"Config": ["api.version.request=true"]
|
||||
}
|
||||
55
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/time.go
generated
vendored
55
vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/time.go
generated
vendored
@@ -1,55 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Confluent Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package kafka
|
||||
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
cTimeoutInfinite = C.int(-1) // Blocks indefinitely until completion.
|
||||
cTimeoutNoWait = C.int(0) // Returns immediately without blocking.
|
||||
)
|
||||
|
||||
// cTimeoutFromContext returns the remaining time after which work done on behalf of this context
|
||||
// should be canceled, in milliseconds.
|
||||
//
|
||||
// If no deadline/timeout is set, or if the timeout does not fit in an int32, it returns
|
||||
// cTimeoutInfinite;
|
||||
// If there is no time left in this context, it returns cTimeoutNoWait.
|
||||
func cTimeoutFromContext(ctx context.Context) C.int {
|
||||
if ctx == nil {
|
||||
return cTimeoutInfinite
|
||||
}
|
||||
timeout, hasTimeout := timeout(ctx)
|
||||
if !hasTimeout {
|
||||
return cTimeoutInfinite
|
||||
}
|
||||
if timeout <= 0 {
|
||||
return cTimeoutNoWait
|
||||
}
|
||||
|
||||
timeoutMs := int64(timeout / time.Millisecond)
|
||||
if int64(int32(timeoutMs)) < timeoutMs {
|
||||
return cTimeoutInfinite
|
||||
}
|
||||
|
||||
return C.int(timeoutMs)
|
||||
}
|
||||
27
vendor/modules.txt
vendored
27
vendor/modules.txt
vendored
@@ -22,10 +22,6 @@ github.com/Masterminds/semver/v3
|
||||
# github.com/Masterminds/sprig/v3 v3.3.0
|
||||
## explicit; go 1.21
|
||||
github.com/Masterminds/sprig/v3
|
||||
# github.com/Microsoft/go-winio v0.6.2
|
||||
## explicit; go 1.21
|
||||
# github.com/Microsoft/hcsshim v0.12.9
|
||||
## explicit; go 1.22
|
||||
# github.com/NYTimes/gziphandler v1.1.1
|
||||
## explicit; go 1.11
|
||||
github.com/NYTimes/gziphandler
|
||||
@@ -155,9 +151,6 @@ github.com/cenkalti/backoff/v4
|
||||
# github.com/cespare/xxhash/v2 v2.3.0
|
||||
## explicit; go 1.11
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991
|
||||
## explicit; go 1.18
|
||||
github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2
|
||||
# github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20250922144431-372892d7c84d
|
||||
## explicit; go 1.23.0
|
||||
github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2
|
||||
@@ -177,12 +170,6 @@ github.com/cloudevents/sdk-go/v2/event/datacodec/xml
|
||||
github.com/cloudevents/sdk-go/v2/protocol
|
||||
github.com/cloudevents/sdk-go/v2/protocol/http
|
||||
github.com/cloudevents/sdk-go/v2/types
|
||||
# github.com/confluentinc/confluent-kafka-go/v2 v2.3.0
|
||||
## explicit; go 1.17
|
||||
github.com/confluentinc/confluent-kafka-go/v2/kafka
|
||||
github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor
|
||||
# github.com/containerd/errdefs/pkg v0.3.0
|
||||
## explicit; go 1.22
|
||||
# github.com/coreos/go-semver v0.3.1
|
||||
## explicit; go 1.8
|
||||
github.com/coreos/go-semver/semver
|
||||
@@ -196,8 +183,6 @@ github.com/cyphar/filepath-securejoin
|
||||
# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
## explicit
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/docker/docker v28.4.0+incompatible
|
||||
## explicit
|
||||
# github.com/eclipse/paho.golang v0.23.0
|
||||
## explicit; go 1.24.0
|
||||
github.com/eclipse/paho.golang/packets
|
||||
@@ -375,10 +360,6 @@ github.com/mitchellh/copystructure
|
||||
# github.com/mitchellh/reflectwalk v1.0.2
|
||||
## explicit
|
||||
github.com/mitchellh/reflectwalk
|
||||
# github.com/moby/docker-image-spec v1.3.1
|
||||
## explicit; go 1.18
|
||||
# github.com/moby/go-archive v0.1.0
|
||||
## explicit; go 1.23.0
|
||||
# github.com/mochi-mqtt/server/v2 v2.7.9
|
||||
## explicit; go 1.21
|
||||
github.com/mochi-mqtt/server/v2
|
||||
@@ -1821,7 +1802,7 @@ open-cluster-management.io/api/operator/v1
|
||||
open-cluster-management.io/api/utils/work/v1/workapplier
|
||||
open-cluster-management.io/api/work/v1
|
||||
open-cluster-management.io/api/work/v1alpha1
|
||||
# open-cluster-management.io/sdk-go v1.1.0
|
||||
# open-cluster-management.io/sdk-go v1.1.1-0.20251117075350-a9794783fa67
|
||||
## explicit; go 1.24.0
|
||||
open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1
|
||||
open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1
|
||||
@@ -1855,15 +1836,19 @@ open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/source/codec
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/constants
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/clients
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/builder
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/cert
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/v2/grpc
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/generic/utils
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/server
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/server/grpc
|
||||
open-cluster-management.io/sdk-go/pkg/cloudevents/server/grpc/authz/kube
|
||||
|
||||
31
vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/base_controller.go
generated
vendored
31
vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/base_controller.go
generated
vendored
@@ -30,14 +30,16 @@ func (c baseController) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error {
|
||||
klog.Infof("Waiting for caches to sync for %s", controllerName)
|
||||
func waitForNamedCacheSync(ctx context.Context, controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
logger.Info("Waiting for caches to sync")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, cacheSyncs...) {
|
||||
return fmt.Errorf("unable to sync caches for %s", controllerName)
|
||||
}
|
||||
|
||||
klog.Infof("Caches are synced for %s ", controllerName)
|
||||
logger.Info("Caches are synced")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -47,10 +49,13 @@ func (c *baseController) SyncContext() SyncContext {
|
||||
}
|
||||
|
||||
func (c *baseController) Run(ctx context.Context, workers int) {
|
||||
logger := klog.FromContext(ctx).WithName(c.name)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
// give caches 10 minutes to sync
|
||||
cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout)
|
||||
defer cacheSyncCancel()
|
||||
err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...)
|
||||
err := waitForNamedCacheSync(ctx, c.name, cacheSyncCtx.Done(), c.cachesToSync...)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -67,19 +72,19 @@ func (c *baseController) Run(ctx context.Context, workers int) {
|
||||
|
||||
var workerWg sync.WaitGroup
|
||||
defer func() {
|
||||
defer klog.Infof("All %s workers have been terminated", c.name)
|
||||
defer logger.Info("All workers have been terminated")
|
||||
workerWg.Wait()
|
||||
}()
|
||||
|
||||
// queueContext is used to track and initiate queue shutdown
|
||||
queueContext, queueContextCancel := context.WithCancel(context.TODO())
|
||||
queueContext, queueContextCancel := context.WithCancel(ctx)
|
||||
|
||||
for i := 1; i <= workers; i++ {
|
||||
klog.Infof("Starting #%d worker of %s controller ...", i, c.name)
|
||||
logger.Info("Starting worker of controller ...", "numberOfWorkers", i)
|
||||
workerWg.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
klog.Infof("Shutting down worker of %s controller ...", c.name)
|
||||
logger.Info("Shutting down worker of controller ...")
|
||||
workerWg.Done()
|
||||
}()
|
||||
c.runWorker(queueContext)
|
||||
@@ -104,7 +109,7 @@ func (c *baseController) Run(ctx context.Context, workers int) {
|
||||
// Wait for all workers to finish their job.
|
||||
// at this point the Run() can hang and caller have to implement the logic that will kill
|
||||
// this controller (SIGKILL).
|
||||
klog.Infof("Shutting down %s ...", c.name)
|
||||
logger.Info("Shutting down ...")
|
||||
}
|
||||
|
||||
func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext, key string) error {
|
||||
@@ -140,6 +145,8 @@ func (c *baseController) runWorker(queueCtx context.Context) {
|
||||
}
|
||||
|
||||
func (c *baseController) processNextWorkItem(queueCtx context.Context) {
|
||||
logger := klog.FromContext(queueCtx)
|
||||
|
||||
key, quit := c.syncContext.Queue().Get()
|
||||
if quit {
|
||||
return
|
||||
@@ -150,10 +157,10 @@ func (c *baseController) processNextWorkItem(queueCtx context.Context) {
|
||||
queueKey := key
|
||||
|
||||
if err := c.sync(queueCtx, syncCtx, queueKey); err != nil {
|
||||
if klog.V(4).Enabled() || key != "key" {
|
||||
utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err))
|
||||
if logger.V(4).Enabled() || key != "key" {
|
||||
utilruntime.HandleErrorWithContext(queueCtx, err, "controller failed to sync", "key", key, "error", err)
|
||||
} else {
|
||||
utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err))
|
||||
utilruntime.HandleErrorWithContext(queueCtx, err, "reconciliation failed", "error", err)
|
||||
}
|
||||
c.syncContext.Queue().AddRateLimited(key)
|
||||
return
|
||||
|
||||
4
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/addon/client.go
generated
vendored
4
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/addon/client.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
// ManagedClusterAddOnClient implements the ManagedClusterAddonInterface.
|
||||
type ManagedClusterAddOnClient struct {
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*addonapiv1alpha1.ManagedClusterAddOn]
|
||||
cloudEventsClient generic.CloudEventsClient[*addonapiv1alpha1.ManagedClusterAddOn]
|
||||
watcherStore store.ClientWatcherStore[*addonapiv1alpha1.ManagedClusterAddOn]
|
||||
namespace string
|
||||
}
|
||||
@@ -31,7 +31,7 @@ type ManagedClusterAddOnClient struct {
|
||||
var _ addonv1alpha1client.ManagedClusterAddOnInterface = &ManagedClusterAddOnClient{}
|
||||
|
||||
func NewManagedClusterAddOnClient(
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*addonapiv1alpha1.ManagedClusterAddOn],
|
||||
cloudEventsClient generic.CloudEventsClient[*addonapiv1alpha1.ManagedClusterAddOn],
|
||||
watcherStore store.ClientWatcherStore[*addonapiv1alpha1.ManagedClusterAddOn],
|
||||
) *ManagedClusterAddOnClient {
|
||||
return &ManagedClusterAddOnClient{
|
||||
|
||||
4
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/cluster/client.go
generated
vendored
4
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/cluster/client.go
generated
vendored
@@ -24,14 +24,14 @@ import (
|
||||
// ManagedClusterClient implements the ManagedClusterInterface. It sends the ManagedCluster status back to source by
|
||||
// CloudEventAgentClient.
|
||||
type ManagedClusterClient struct {
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*clusterv1.ManagedCluster]
|
||||
cloudEventsClient generic.CloudEventsClient[*clusterv1.ManagedCluster]
|
||||
watcherStore store.ClientWatcherStore[*clusterv1.ManagedCluster]
|
||||
}
|
||||
|
||||
var _ clusterv1client.ManagedClusterInterface = &ManagedClusterClient{}
|
||||
|
||||
func NewManagedClusterClient(
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*clusterv1.ManagedCluster],
|
||||
cloudEventsClient generic.CloudEventsClient[*clusterv1.ManagedCluster],
|
||||
watcherStore store.ClientWatcherStore[*clusterv1.ManagedCluster],
|
||||
clusterName string,
|
||||
) *ManagedClusterClient {
|
||||
|
||||
10
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/csr/client.go
generated
vendored
10
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/csr/client.go
generated
vendored
@@ -23,14 +23,14 @@ import (
|
||||
// CSRClient implements the CSRInterface. It sends the csr to source by
|
||||
// CloudEventAgentClient.
|
||||
type CSRClient struct {
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*certificatev1.CertificateSigningRequest]
|
||||
cloudEventsClient generic.CloudEventsClient[*certificatev1.CertificateSigningRequest]
|
||||
watcherStore store.ClientWatcherStore[*certificatev1.CertificateSigningRequest]
|
||||
}
|
||||
|
||||
var _ cache.ListerWatcher = &CSRClient{}
|
||||
|
||||
func NewCSRClient(
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*certificatev1.CertificateSigningRequest],
|
||||
cloudEventsClient generic.CloudEventsClient[*certificatev1.CertificateSigningRequest],
|
||||
watcherStore store.ClientWatcherStore[*certificatev1.CertificateSigningRequest],
|
||||
clusterName string,
|
||||
) *CSRClient {
|
||||
@@ -67,6 +67,12 @@ func (c *CSRClient) Create(ctx context.Context, csr *certificatev1.CertificateSi
|
||||
return nil, cloudeventserrors.ToStatusError(common.CSRGR, csr.Name, err)
|
||||
}
|
||||
|
||||
// we need to add to the store here since grpc driver may call this when it cannot
|
||||
// get from lister.
|
||||
if err := c.watcherStore.Add(csr); err != nil {
|
||||
return nil, errors.NewInternalError(err)
|
||||
}
|
||||
|
||||
return csr.DeepCopy(), nil
|
||||
}
|
||||
|
||||
|
||||
8
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/csr/clientholder.go
generated
vendored
8
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/csr/clientholder.go
generated
vendored
@@ -2,14 +2,12 @@ package csr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
certificatev1 "k8s.io/api/certificates/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/options"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store"
|
||||
)
|
||||
|
||||
// ClientHolder holds a client that implements list/watch for CSR and a SharedIndexInformer for CSR
|
||||
@@ -39,11 +37,5 @@ func NewAgentClientHolder(ctx context.Context, opt *options.GenericClientOptions
|
||||
csrClient, &certificatev1.CertificateSigningRequest{}, 30*time.Second,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
|
||||
agentStore, ok := opt.WatcherStore().(*store.AgentInformerWatcherStore[*certificatev1.CertificateSigningRequest])
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("watcher store must be of type AgentInformerWatcherStore")
|
||||
}
|
||||
agentStore.SetInformer(csrInformer)
|
||||
|
||||
return &ClientHolder{client: csrClient, informer: csrInformer}, nil
|
||||
}
|
||||
|
||||
4
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/event/client.go
generated
vendored
4
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/event/client.go
generated
vendored
@@ -19,11 +19,11 @@ import (
|
||||
)
|
||||
|
||||
type EventClient struct {
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*eventv1.Event]
|
||||
cloudEventsClient generic.CloudEventsClient[*eventv1.Event]
|
||||
namespace string
|
||||
}
|
||||
|
||||
func NewEventClient(cloudEventsClient *generic.CloudEventAgentClient[*eventv1.Event]) *EventClient {
|
||||
func NewEventClient(cloudEventsClient generic.CloudEventsClient[*eventv1.Event]) *EventClient {
|
||||
return &EventClient{
|
||||
cloudEventsClient: cloudEventsClient,
|
||||
}
|
||||
|
||||
2
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/lease/client.go
generated
vendored
2
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/lease/client.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
type LeaseClient struct {
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*coordinationv1.Lease]
|
||||
cloudEventsClient generic.CloudEventsClient[*coordinationv1.Lease]
|
||||
watcherStore store.ClientWatcherStore[*coordinationv1.Lease]
|
||||
namespace string
|
||||
}
|
||||
|
||||
32
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/options/generic.go
generated
vendored
32
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/options/generic.go
generated
vendored
@@ -9,6 +9,8 @@ import (
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/statushash"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/clients"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/builder"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
)
|
||||
|
||||
@@ -31,8 +33,6 @@ type GenericClientOptions[T generic.ResourceObject] struct {
|
||||
//
|
||||
// GRPCOptions (*grpc.GRPCOptions): builds a generic cloudevents client with GRPC
|
||||
//
|
||||
// KafkaOptions (*kafka.KafkaOptions): builds a generic cloudevents client with Kafka
|
||||
//
|
||||
// - codec, the codec for resource
|
||||
//
|
||||
// - clientID, the client ID for generic cloudevents client.
|
||||
@@ -98,7 +98,9 @@ func (o *GenericClientOptions[T]) WatcherStore() store.ClientWatcherStore[T] {
|
||||
return o.watcherStore
|
||||
}
|
||||
|
||||
func (o *GenericClientOptions[T]) AgentClient(ctx context.Context) (*generic.CloudEventAgentClient[T], error) {
|
||||
func (o *GenericClientOptions[T]) AgentClient(ctx context.Context) (generic.CloudEventsClient[T], error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
if len(o.clientID) == 0 {
|
||||
return nil, fmt.Errorf("client id is required")
|
||||
}
|
||||
@@ -111,12 +113,12 @@ func (o *GenericClientOptions[T]) AgentClient(ctx context.Context) (*generic.Clo
|
||||
o.watcherStore = store.NewAgentInformerWatcherStore[T]()
|
||||
}
|
||||
|
||||
options, err := generic.BuildCloudEventsAgentOptions(o.config, o.clusterName, o.clientID)
|
||||
options, err := builder.BuildCloudEventsAgentOptions(o.config, o.clusterName, o.clientID, o.codec.EventDataType())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cloudEventsClient, err := generic.NewCloudEventAgentClient(
|
||||
cloudEventsClient, err := clients.NewCloudEventAgentClient(
|
||||
ctx,
|
||||
options,
|
||||
store.NewAgentWatcherStoreLister(o.watcherStore),
|
||||
@@ -140,14 +142,14 @@ func (o *GenericClientOptions[T]) AgentClient(ctx context.Context) (*generic.Clo
|
||||
return
|
||||
case <-cloudEventsClient.ReconnectedChan():
|
||||
if !o.resync {
|
||||
klog.V(4).Infof("resync is disabled, do nothing")
|
||||
logger.Info("resync is disabled, do nothing")
|
||||
continue
|
||||
}
|
||||
|
||||
// when receiving a client reconnected signal, we resync all sources for this agent
|
||||
// TODO after supporting multiple sources, we should only resync agent known sources
|
||||
if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil {
|
||||
klog.Errorf("failed to send resync request, %v", err)
|
||||
logger.Error(err, "failed to send resync request")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -161,7 +163,7 @@ func (o *GenericClientOptions[T]) AgentClient(ctx context.Context) (*generic.Clo
|
||||
go func() {
|
||||
if store.WaitForStoreInit(ctx, o.watcherStore.HasInitiated) {
|
||||
if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil {
|
||||
klog.Errorf("failed to send resync request, %v", err)
|
||||
logger.Error(err, "failed to send resync request")
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -169,7 +171,9 @@ func (o *GenericClientOptions[T]) AgentClient(ctx context.Context) (*generic.Clo
|
||||
return cloudEventsClient, nil
|
||||
}
|
||||
|
||||
func (o *GenericClientOptions[T]) SourceClient(ctx context.Context) (*generic.CloudEventSourceClient[T], error) {
|
||||
func (o *GenericClientOptions[T]) SourceClient(ctx context.Context) (generic.CloudEventsClient[T], error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
if len(o.clientID) == 0 {
|
||||
return nil, fmt.Errorf("client id is required")
|
||||
}
|
||||
@@ -182,12 +186,12 @@ func (o *GenericClientOptions[T]) SourceClient(ctx context.Context) (*generic.Cl
|
||||
return nil, fmt.Errorf("a watcher store is required")
|
||||
}
|
||||
|
||||
options, err := generic.BuildCloudEventsSourceOptions(o.config, o.clientID, o.sourceID)
|
||||
options, err := builder.BuildCloudEventsSourceOptions(o.config, o.clientID, o.sourceID, o.codec.EventDataType())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cloudEventsClient, err := generic.NewCloudEventSourceClient(
|
||||
cloudEventsClient, err := clients.NewCloudEventSourceClient(
|
||||
ctx,
|
||||
options,
|
||||
store.NewSourceWatcherStoreLister(o.watcherStore),
|
||||
@@ -211,13 +215,13 @@ func (o *GenericClientOptions[T]) SourceClient(ctx context.Context) (*generic.Cl
|
||||
return
|
||||
case <-cloudEventsClient.ReconnectedChan():
|
||||
if !o.resync {
|
||||
klog.V(4).Infof("resync is disabled, do nothing")
|
||||
logger.Info("resync is disabled, do nothing")
|
||||
continue
|
||||
}
|
||||
|
||||
// when receiving a client reconnected signal, we resync all clusters for this source
|
||||
if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil {
|
||||
klog.Errorf("failed to send resync request, %v", err)
|
||||
logger.Error(err, "failed to send resync request")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -231,7 +235,7 @@ func (o *GenericClientOptions[T]) SourceClient(ctx context.Context) (*generic.Cl
|
||||
go func() {
|
||||
if store.WaitForStoreInit(ctx, o.watcherStore.HasInitiated) {
|
||||
if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil {
|
||||
klog.Errorf("failed to send resync request, %v", err)
|
||||
logger.Error(err, "failed to send resync request")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
57
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store/informer.go
generated
vendored
57
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store/informer.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -8,8 +9,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/utils"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
@@ -17,40 +16,40 @@ import (
|
||||
|
||||
// AgentInformerWatcherStore extends the BaseClientWatchStore.
|
||||
|
||||
// It gets/lists the resources from the given informer store and send
|
||||
// It gets/lists the resources from the given local store and send
|
||||
// the resource add/update/delete event to the watch channel directly.
|
||||
//
|
||||
// It is used for building resource agent client.
|
||||
type AgentInformerWatcherStore[T generic.ResourceObject] struct {
|
||||
BaseClientWatchStore[T]
|
||||
Watcher *Watcher
|
||||
|
||||
informer cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
func NewAgentInformerWatcherStore[T generic.ResourceObject]() *AgentInformerWatcherStore[T] {
|
||||
return &AgentInformerWatcherStore[T]{
|
||||
BaseClientWatchStore: BaseClientWatchStore[T]{},
|
||||
Watcher: NewWatcher(),
|
||||
BaseClientWatchStore: BaseClientWatchStore[T]{
|
||||
Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
},
|
||||
Watcher: NewWatcher(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore[T]) Add(resource runtime.Object) error {
|
||||
s.Watcher.Receive(watch.Event{Type: watch.Added, Object: resource})
|
||||
return nil
|
||||
return s.Store.Add(resource)
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore[T]) Update(resource runtime.Object) error {
|
||||
s.Watcher.Receive(watch.Event{Type: watch.Modified, Object: resource})
|
||||
return nil
|
||||
return s.Store.Update(resource)
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore[T]) Delete(resource runtime.Object) error {
|
||||
s.Watcher.Receive(watch.Event{Type: watch.Deleted, Object: resource})
|
||||
return nil
|
||||
return s.Store.Delete(resource)
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore[T]) HandleReceivedResource(action types.ResourceAction, resource T) error {
|
||||
func (s *AgentInformerWatcherStore[T]) HandleReceivedResource(ctx context.Context, action types.ResourceAction, resource T) error {
|
||||
switch action {
|
||||
case types.Added:
|
||||
newObj, err := utils.ToRuntimeObject(resource)
|
||||
@@ -60,23 +59,22 @@ func (s *AgentInformerWatcherStore[T]) HandleReceivedResource(action types.Resou
|
||||
|
||||
return s.Add(newObj)
|
||||
case types.Modified:
|
||||
newObj, err := meta.Accessor(resource)
|
||||
accessor, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastObj, exists, err := s.Get(newObj.GetNamespace(), newObj.GetName())
|
||||
lastObj, exists, err := s.Get(accessor.GetNamespace(), accessor.GetName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return fmt.Errorf("the resource %s/%s does not exist", newObj.GetNamespace(), newObj.GetName())
|
||||
return fmt.Errorf("the resource %s/%s does not exist", accessor.GetNamespace(), accessor.GetName())
|
||||
}
|
||||
|
||||
// prevent the resource from being updated if it is deleting
|
||||
// if resource is deleting, keep the deletion timestamp
|
||||
if !lastObj.GetDeletionTimestamp().IsZero() {
|
||||
klog.Warningf("the resource %s/%s is deleting, ignore the update", newObj.GetNamespace(), newObj.GetName())
|
||||
return nil
|
||||
accessor.SetDeletionTimestamp(lastObj.GetDeletionTimestamp())
|
||||
}
|
||||
|
||||
updated, err := utils.ToRuntimeObject(resource)
|
||||
@@ -95,10 +93,6 @@ func (s *AgentInformerWatcherStore[T]) HandleReceivedResource(action types.Resou
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(newObj.GetFinalizers()) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
last, exists, err := s.Get(newObj.GetNamespace(), newObj.GetName())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -112,6 +106,19 @@ func (s *AgentInformerWatcherStore[T]) HandleReceivedResource(action types.Resou
|
||||
return err
|
||||
}
|
||||
|
||||
// trigger an update event if the object is deleting.
|
||||
// Only need to update generation/finalizer/deletionTimeStamp of the object.
|
||||
if len(newObj.GetFinalizers()) != 0 {
|
||||
accessor, err := meta.Accessor(deletingObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor.SetDeletionTimestamp(newObj.GetDeletionTimestamp())
|
||||
accessor.SetFinalizers(newObj.GetFinalizers())
|
||||
accessor.SetGeneration(newObj.GetGeneration())
|
||||
return s.Update(deletingObj)
|
||||
}
|
||||
|
||||
return s.Delete(deletingObj)
|
||||
default:
|
||||
return fmt.Errorf("unsupported resource action %s", action)
|
||||
@@ -123,11 +130,5 @@ func (s *AgentInformerWatcherStore[T]) GetWatcher(namespace string, opts metav1.
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore[T]) HasInitiated() bool {
|
||||
return s.Initiated && s.informer.HasSynced()
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore[T]) SetInformer(informer cache.SharedIndexInformer) {
|
||||
s.informer = informer
|
||||
s.Store = informer.GetStore()
|
||||
s.Initiated = true
|
||||
return true
|
||||
}
|
||||
|
||||
5
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store/interface.go
generated
vendored
5
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store/interface.go
generated
vendored
@@ -34,7 +34,7 @@ type ClientWatcherStore[T generic.ResourceObject] interface {
|
||||
GetWatcher(namespace string, opts metav1.ListOptions) (watch.Interface, error)
|
||||
|
||||
// HandleReceivedResource handles the client received resource events.
|
||||
HandleReceivedResource(action types.ResourceAction, resource T) error
|
||||
HandleReceivedResource(ctx context.Context, action types.ResourceAction, resource T) error
|
||||
|
||||
// Add will be called by resource client when adding resources. The implementation is based on the specific
|
||||
// watcher store, in some case, it does not need to update a store, but just send a watch event.
|
||||
@@ -63,6 +63,7 @@ type ClientWatcherStore[T generic.ResourceObject] interface {
|
||||
}
|
||||
|
||||
func WaitForStoreInit(ctx context.Context, cacheSyncs ...StoreInitiated) bool {
|
||||
logger := klog.FromContext(ctx)
|
||||
err := wait.PollUntilContextCancel(
|
||||
ctx,
|
||||
syncedPollPeriod,
|
||||
@@ -77,7 +78,7 @@ func WaitForStoreInit(ctx context.Context, cacheSyncs ...StoreInitiated) bool {
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("stop WaitForStoreInit, %v", err)
|
||||
logger.Error(err, "stop WaitForStoreInit")
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -50,7 +51,9 @@ func (s *SimpleStore[T]) HasInitiated() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *SimpleStore[T]) HandleReceivedResource(action types.ResourceAction, resource T) error {
|
||||
func (s *SimpleStore[T]) HandleReceivedResource(ctx context.Context, action types.ResourceAction, resource T) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
switch action {
|
||||
case types.Added:
|
||||
newObj, err := utils.ToRuntimeObject(resource)
|
||||
@@ -75,7 +78,8 @@ func (s *SimpleStore[T]) HandleReceivedResource(action types.ResourceAction, res
|
||||
|
||||
// prevent the resource from being updated if it is deleting
|
||||
if !lastObj.GetDeletionTimestamp().IsZero() {
|
||||
klog.Warningf("the resource %s/%s is deleting, ignore the update", newObj.GetNamespace(), newObj.GetName())
|
||||
logger.Info("the resource is deleting, ignore the update",
|
||||
"resourceNamespace", newObj.GetNamespace(), "resourceName", newObj.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,12 +3,12 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/utils"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
)
|
||||
|
||||
@@ -30,7 +31,7 @@ import (
|
||||
type ManifestWorkAgentClient struct {
|
||||
sync.RWMutex
|
||||
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork]
|
||||
cloudEventsClient generic.CloudEventsClient[*workv1.ManifestWork]
|
||||
watcherStore store.ClientWatcherStore[*workv1.ManifestWork]
|
||||
|
||||
// this namespace should be same with the cluster name to which this client subscribes
|
||||
@@ -40,9 +41,9 @@ type ManifestWorkAgentClient struct {
|
||||
var _ workv1client.ManifestWorkInterface = &ManifestWorkAgentClient{}
|
||||
|
||||
func NewManifestWorkAgentClient(
|
||||
clusterName string,
|
||||
_ string,
|
||||
watcherStore store.ClientWatcherStore[*workv1.ManifestWork],
|
||||
cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork],
|
||||
cloudEventsClient generic.CloudEventsClient[*workv1.ManifestWork],
|
||||
) *ManifestWorkAgentClient {
|
||||
return &ManifestWorkAgentClient{
|
||||
cloudEventsClient: cloudEventsClient,
|
||||
@@ -75,33 +76,36 @@ func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts met
|
||||
}
|
||||
|
||||
func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) {
|
||||
klog.V(4).Infof("getting manifestwork %s/%s", c.namespace, name)
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
logger.V(4).Info("getting manifestwork", "manifestWorkNamespace", c.namespace, "manifestWorkName", name)
|
||||
work, exists, err := c.watcherStore.Get(c.namespace, name)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
if !exists {
|
||||
returnErr := errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
generic.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("get", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("get", metav1.StatusSuccess)
|
||||
return work, nil
|
||||
}
|
||||
|
||||
func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) {
|
||||
klog.V(4).Infof("list manifestworks from cluster %s", c.namespace)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("list manifestworks from cluster", "cluster", c.namespace)
|
||||
works, err := c.watcherStore.List(c.namespace, opts)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("list", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("list", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("list", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("list", metav1.StatusSuccess)
|
||||
items := []workv1.ManifestWork{}
|
||||
for _, work := range works.Items {
|
||||
items = append(items, *work)
|
||||
@@ -111,152 +115,164 @@ func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOpti
|
||||
}
|
||||
|
||||
func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
klog.V(4).Infof("watch manifestworks from cluster %s", c.namespace)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("watch manifestworks from cluster", "cluster", c.namespace)
|
||||
watcher, err := c.watcherStore.GetWatcher(c.namespace, opts)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("watch", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("watch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("watch", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("watch", metav1.StatusSuccess)
|
||||
return watcher, nil
|
||||
}
|
||||
|
||||
func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) {
|
||||
klog.V(4).Infof("patching manifestwork %s/%s", c.namespace, name)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("patching manifestwork", "manifestWorkNamespace", c.namespace, "manifestWorkName", name)
|
||||
|
||||
// avoid race conditions among the agent's go routines
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
var returnErr *errors.StatusError
|
||||
defer func() {
|
||||
if returnErr != nil {
|
||||
metrics.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
} else {
|
||||
metrics.IncreaseWorkProcessedCounter("patch", metav1.StatusSuccess)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(subresources) != 0 && !utils.IsStatusPatch(subresources) {
|
||||
msg := fmt.Sprintf("unsupported subresources %v", subresources)
|
||||
returnErr = errors.NewGenericServerResponse(http.StatusMethodNotAllowed, "patch", common.ManifestWorkGR, name, msg, 0, false)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
lastWork, exists, err := c.watcherStore.Get(c.namespace, name)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
if !exists {
|
||||
returnErr := errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
patchedWork, err := utils.Patch(pt, lastWork, data)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[common.CloudEventsDataTypeAnnotationKey])
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
eventType := types.CloudEventsType{
|
||||
CloudEventsDataType: *eventDataType,
|
||||
SubResource: types.SubResourceStatus,
|
||||
Action: types.UpdateRequestAction,
|
||||
}
|
||||
|
||||
if returnErr = versionCompare(patchedWork, lastWork); returnErr != nil {
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
newWork := patchedWork.DeepCopy()
|
||||
|
||||
if utils.IsStatusPatch(subresources) {
|
||||
// avoid race conditions among the agent's go routines
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
isDeleted := !newWork.DeletionTimestamp.IsZero() && len(newWork.Finalizers) == 0
|
||||
|
||||
if utils.IsStatusPatch(subresources) || isDeleted {
|
||||
if isDeleted {
|
||||
meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{
|
||||
Type: common.ResourceDeleted,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ManifestsDeleted",
|
||||
Message: fmt.Sprintf("The manifests are deleted from the cluster %s", newWork.Namespace),
|
||||
})
|
||||
}
|
||||
|
||||
// Set work's resource version to remote resource version for publishing
|
||||
workToPublish := newWork.DeepCopy()
|
||||
workToPublish.ResourceVersion = ""
|
||||
|
||||
eventType.Action = types.UpdateRequestAction
|
||||
// publish the status update event to source, source will check the resource version
|
||||
// and reject the update if it's status update is outdated.
|
||||
if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil {
|
||||
returnErr := cloudeventserrors.ToStatusError(common.ManifestWorkGR, name, err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
if err := c.cloudEventsClient.Publish(ctx, eventType, workToPublish); err != nil {
|
||||
returnErr = cloudeventserrors.ToStatusError(common.ManifestWorkGR, name, err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
// Fetch the latest work from the store and verify the resource version to avoid updating the store
|
||||
// with outdated work. Return a conflict error if the resource version is outdated.
|
||||
// Due to the lack of read-modify-write guarantees in the store, race conditions may occur between
|
||||
// this update operation and one from the agent informer after receiving the event from the source.
|
||||
latestWork, exists, err := c.watcherStore.Get(c.namespace, name)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
if !exists {
|
||||
returnErr := errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
lastResourceVersion, err := strconv.ParseInt(latestWork.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
newResourceVersion, err := strconv.ParseInt(newWork.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
// ensure the resource version of the work is not outdated
|
||||
if newResourceVersion < lastResourceVersion {
|
||||
// It's safe to return a conflict error here, even if the status update event
|
||||
// has already been sent. The source may reject the update due to an outdated resource version.
|
||||
returnErr := errors.NewConflict(common.ManifestWorkGR, name, fmt.Errorf("the resource version of the work is outdated"))
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
if err := c.watcherStore.Update(newWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("patch", metav1.StatusSuccess)
|
||||
return newWork, nil
|
||||
}
|
||||
|
||||
if len(subresources) != 0 {
|
||||
msg := fmt.Sprintf("unsupported subresources %v", subresources)
|
||||
returnErr := errors.NewGenericServerResponse(http.StatusMethodNotAllowed, "patch", common.ManifestWorkGR, name, msg, 0, false)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
// the finalizers of a deleting manifestwork are removed, marking the manifestwork status to deleted and sending
|
||||
// it back to source
|
||||
if !newWork.DeletionTimestamp.IsZero() && len(newWork.Finalizers) == 0 {
|
||||
meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{
|
||||
Type: common.ResourceDeleted,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ManifestsDeleted",
|
||||
Message: fmt.Sprintf("The manifests are deleted from the cluster %s", newWork.Namespace),
|
||||
})
|
||||
|
||||
eventType.Action = types.UpdateRequestAction
|
||||
if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil {
|
||||
returnErr := cloudeventserrors.ToStatusError(common.ManifestWorkGR, name, err)
|
||||
generic.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
if isDeleted {
|
||||
if err := c.watcherStore.Delete(newWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("delete", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("delete", metav1.StatusSuccess)
|
||||
return newWork, nil
|
||||
}
|
||||
|
||||
if err := c.watcherStore.Update(newWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
// Fetch the latest work from the store and verify the resource version to avoid updating the store
|
||||
// with outdated work. Return a conflict error if the resource version is outdated.
|
||||
// Due to the lack of read-modify-write guarantees in the store, race conditions may occur between
|
||||
// this update operation and one from the agent informer after receiving the event from the source.
|
||||
latestWork, exists, err := c.watcherStore.Get(c.namespace, name)
|
||||
if err != nil {
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
if !exists {
|
||||
returnErr = errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("patch", metav1.StatusSuccess)
|
||||
if returnErr = versionCompare(patchedWork, latestWork); returnErr != nil {
|
||||
return nil, returnErr
|
||||
}
|
||||
if err := c.watcherStore.Update(newWork); err != nil {
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
return newWork, nil
|
||||
}
|
||||
|
||||
func versionCompare(new, old *workv1.ManifestWork) *errors.StatusError {
|
||||
// Resource version 0 means force conflict.
|
||||
if new.GetResourceVersion() == "0" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if new.GetResourceVersion() == "" {
|
||||
return errors.NewConflict(common.ManifestWorkGR, new.Name, fmt.Errorf(
|
||||
"the resource version of the work cannot be empty"))
|
||||
}
|
||||
|
||||
lastResourceVersion, err := strconv.ParseInt(old.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return errors.NewInternalError(err)
|
||||
}
|
||||
newResourceVersion, err := strconv.ParseInt(new.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return errors.NewInternalError(err)
|
||||
}
|
||||
|
||||
// ensure the resource version of the work is not outdated
|
||||
if newResourceVersion < lastResourceVersion {
|
||||
// It's safe to return a conflict error here, even if the status update event
|
||||
// has already been sent. The source may reject the update due to an outdated resource version.
|
||||
return errors.NewConflict(common.ManifestWorkGR, new.Name, fmt.Errorf(
|
||||
"the resource version of the work is outdated, new %d, old %d", newResourceVersion, lastResourceVersion))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@ package codec
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/bwmarrin/snowflake"
|
||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
||||
cloudeventstypes "github.com/cloudevents/sdk-go/v2/types"
|
||||
@@ -50,11 +48,6 @@ func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT
|
||||
return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType)
|
||||
}
|
||||
|
||||
resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err)
|
||||
}
|
||||
|
||||
originalSource, ok := work.Labels[common.CloudEventsOriginalSourceLabelKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID)
|
||||
@@ -63,7 +56,7 @@ func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT
|
||||
evt := types.NewEventBuilder(source, eventType).
|
||||
WithResourceID(string(work.UID)).
|
||||
WithStatusUpdateSequenceID(sequenceGenerator.Generate().String()).
|
||||
WithResourceVersion(resourceVersion).
|
||||
WithResourceVersion(work.Generation).
|
||||
WithClusterName(work.Namespace).
|
||||
WithOriginalSource(originalSource).
|
||||
NewEvent()
|
||||
@@ -139,8 +132,10 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo
|
||||
metaObj.Name = resourceID
|
||||
}
|
||||
metaObj.Namespace = clusterName
|
||||
metaObj.ResourceVersion = fmt.Sprintf("%d", resourceVersion)
|
||||
// if generation is not set, set it the same as resourceVersion
|
||||
// This is explicitly set to empty since it will be managed by local client.
|
||||
metaObj.ResourceVersion = ""
|
||||
// The resourceVersion in cloudevent actually sematically equals to generation, since it increments when
|
||||
// spec changes
|
||||
if metaObj.Generation == 0 {
|
||||
metaObj.Generation = int64(resourceVersion)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -21,12 +22,13 @@ import (
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/utils"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/payload"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
)
|
||||
|
||||
// ManifestWorkSourceClient implements the ManifestWorkInterface.
|
||||
type ManifestWorkSourceClient struct {
|
||||
cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork]
|
||||
cloudEventsClient generic.CloudEventsClient[*workv1.ManifestWork]
|
||||
watcherStore store.ClientWatcherStore[*workv1.ManifestWork]
|
||||
namespace string
|
||||
sourceID string
|
||||
@@ -37,7 +39,7 @@ var _ workv1client.ManifestWorkInterface = &ManifestWorkSourceClient{}
|
||||
func NewManifestWorkSourceClient(
|
||||
sourceID string,
|
||||
watcherStore store.ClientWatcherStore[*workv1.ManifestWork],
|
||||
cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork],
|
||||
cloudEventsClient generic.CloudEventsClient[*workv1.ManifestWork],
|
||||
) *ManifestWorkSourceClient {
|
||||
return &ManifestWorkSourceClient{
|
||||
cloudEventsClient: cloudEventsClient,
|
||||
@@ -51,27 +53,34 @@ func (c *ManifestWorkSourceClient) SetNamespace(namespace string) {
|
||||
}
|
||||
|
||||
func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) {
|
||||
var returnErr *errors.StatusError
|
||||
|
||||
defer func() {
|
||||
if returnErr != nil {
|
||||
metrics.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
} else {
|
||||
metrics.IncreaseWorkProcessedCounter("create", metav1.StatusSuccess)
|
||||
}
|
||||
}()
|
||||
|
||||
if manifestWork.Namespace != "" && manifestWork.Namespace != c.namespace {
|
||||
returnErr := errors.NewInvalid(common.ManifestWorkGK, manifestWork.Name, field.ErrorList{
|
||||
returnErr = errors.NewInvalid(common.ManifestWorkGK, manifestWork.Name, field.ErrorList{
|
||||
field.Invalid(
|
||||
field.NewPath("metadata").Child("namespace"),
|
||||
manifestWork.Namespace,
|
||||
fmt.Sprintf("does not match the namespace %s", c.namespace),
|
||||
),
|
||||
})
|
||||
generic.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
_, exists, err := c.watcherStore.Get(c.namespace, manifestWork.Name)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
if exists {
|
||||
returnErr := errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name)
|
||||
generic.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
@@ -86,34 +95,36 @@ func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *wor
|
||||
newWork := manifestWork.DeepCopy()
|
||||
newWork.UID = kubetypes.UID(utils.UID(c.sourceID, common.ManifestWorkGR.String(), c.namespace, newWork.Name))
|
||||
newWork.Namespace = c.namespace
|
||||
newWork.ResourceVersion = getWorkResourceVersion(manifestWork)
|
||||
|
||||
rv, generation, err := getWorkResourceVersion(manifestWork)
|
||||
if err != nil {
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
newWork.Generation = generation
|
||||
newWork.ResourceVersion = rv
|
||||
|
||||
if err := utils.EncodeManifests(newWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
if errs := utils.ValidateWork(newWork); len(errs) != 0 {
|
||||
returnErr := errors.NewInvalid(common.ManifestWorkGK, manifestWork.Name, errs)
|
||||
generic.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInvalid(common.ManifestWorkGK, manifestWork.Name, errs)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil {
|
||||
returnErr := cloudeventserrors.ToStatusError(common.ManifestWorkGR, manifestWork.Name, err)
|
||||
generic.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = cloudeventserrors.ToStatusError(common.ManifestWorkGR, manifestWork.Name, err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
// add the new work to the local cache.
|
||||
if err := c.watcherStore.Add(newWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("create", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("create", metav1.StatusSuccess)
|
||||
return newWork.DeepCopy(), nil
|
||||
}
|
||||
|
||||
@@ -129,7 +140,7 @@ func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts
|
||||
work, exists, err := c.watcherStore.Get(c.namespace, name)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
return returnErr
|
||||
}
|
||||
if !exists {
|
||||
@@ -150,7 +161,7 @@ func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts
|
||||
|
||||
if err := c.cloudEventsClient.Publish(ctx, eventType, deletingWork); err != nil {
|
||||
returnErr := cloudeventserrors.ToStatusError(common.ManifestWorkGR, name, err)
|
||||
generic.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
return returnErr
|
||||
}
|
||||
|
||||
@@ -161,18 +172,18 @@ func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts
|
||||
// after the deleted status is back, we need ignore this work in the ManifestWorkSourceHandler.
|
||||
if err := c.watcherStore.Delete(deletingWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
return returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("delete", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("delete", metav1.StatusSuccess)
|
||||
return nil
|
||||
}
|
||||
|
||||
// update the work with deletion timestamp in the local cache.
|
||||
if err := c.watcherStore.Update(deletingWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("delete", string(returnErr.ErrStatus.Reason))
|
||||
return returnErr
|
||||
}
|
||||
|
||||
@@ -184,33 +195,35 @@ func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts me
|
||||
}
|
||||
|
||||
func (c *ManifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) {
|
||||
klog.V(4).Infof("getting manifestwork %s", name)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("getting manifestwork", "manifestWorkName", name)
|
||||
work, exists, err := c.watcherStore.Get(c.namespace, name)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
if !exists {
|
||||
returnErr := errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
generic.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("get", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("get", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("get", metav1.StatusSuccess)
|
||||
return work, nil
|
||||
}
|
||||
|
||||
func (c *ManifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) {
|
||||
klog.V(4).Infof("list manifestworks")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("list manifestworks")
|
||||
works, err := c.watcherStore.List(c.namespace, opts)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("list", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("list", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("list", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("list", metav1.StatusSuccess)
|
||||
items := []workv1.ManifestWork{}
|
||||
for _, work := range works.Items {
|
||||
items = append(items, *work)
|
||||
@@ -223,40 +236,46 @@ func (c *ManifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOp
|
||||
watcher, err := c.watcherStore.GetWatcher(c.namespace, opts)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("watch", string(returnErr.ErrStatus.Reason))
|
||||
metrics.IncreaseWorkProcessedCounter("watch", string(returnErr.ErrStatus.Reason))
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("watch", metav1.StatusSuccess)
|
||||
metrics.IncreaseWorkProcessedCounter("watch", metav1.StatusSuccess)
|
||||
return watcher, nil
|
||||
}
|
||||
|
||||
func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) {
|
||||
klog.V(4).Infof("patching manifestwork %s", name)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("patching manifestwork", "manifestWorkName", name)
|
||||
|
||||
var returnErr *errors.StatusError
|
||||
defer func() {
|
||||
if returnErr != nil {
|
||||
metrics.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
} else {
|
||||
metrics.IncreaseWorkProcessedCounter("patch", metav1.StatusSuccess)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(subresources) != 0 {
|
||||
msg := fmt.Sprintf("unsupported to update subresources %v", subresources)
|
||||
returnErr := errors.NewGenericServerResponse(http.StatusMethodNotAllowed, "patch", common.ManifestWorkGR, name, msg, 0, false)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewGenericServerResponse(http.StatusMethodNotAllowed, "patch", common.ManifestWorkGR, name, msg, 0, false)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
lastWork, exists, err := c.watcherStore.Get(c.namespace, name)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
if !exists {
|
||||
returnErr := errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewNotFound(common.ManifestWorkGR, name)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
patchedWork, err := utils.Patch(pt, lastWork, data)
|
||||
if err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
@@ -269,28 +288,30 @@ func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt ku
|
||||
}
|
||||
|
||||
newWork := patchedWork.DeepCopy()
|
||||
newWork.ResourceVersion = getWorkResourceVersion(patchedWork)
|
||||
rv, generation, err := getWorkResourceVersion(patchedWork)
|
||||
if err != nil {
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
newWork.Generation = generation
|
||||
newWork.ResourceVersion = rv
|
||||
|
||||
if errs := utils.ValidateWork(newWork); len(errs) != 0 {
|
||||
returnErr := errors.NewInvalid(common.ManifestWorkGK, name, errs)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInvalid(common.ManifestWorkGK, name, errs)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil {
|
||||
returnErr := cloudeventserrors.ToStatusError(common.ManifestWorkGR, name, err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = cloudeventserrors.ToStatusError(common.ManifestWorkGR, name, err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
// modify the updated work in the local cache.
|
||||
if err := c.watcherStore.Update(newWork); err != nil {
|
||||
returnErr := errors.NewInternalError(err)
|
||||
generic.IncreaseWorkProcessedCounter("patch", string(returnErr.ErrStatus.Reason))
|
||||
returnErr = errors.NewInternalError(err)
|
||||
return nil, returnErr
|
||||
}
|
||||
|
||||
generic.IncreaseWorkProcessedCounter("patch", metav1.StatusSuccess)
|
||||
return newWork.DeepCopy(), nil
|
||||
}
|
||||
|
||||
@@ -299,15 +320,29 @@ func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt ku
|
||||
// firstly, if no annotation is set, we will get the the resource version from work itself,
|
||||
// if the wok does not have it, "0" will be returned, which means the version of the work
|
||||
// will not be maintained on source, the message broker guarantees the work update order.
|
||||
func getWorkResourceVersion(work *workv1.ManifestWork) string {
|
||||
func getWorkResourceVersion(work *workv1.ManifestWork) (string, int64, error) {
|
||||
var generation int64
|
||||
var err error
|
||||
|
||||
resourceVersion, ok := work.Annotations[common.CloudEventsResourceVersionAnnotationKey]
|
||||
if ok {
|
||||
return resourceVersion
|
||||
generation, err = strconv.ParseInt(resourceVersion, 10, 64)
|
||||
if err != nil {
|
||||
return "", 0, errors.NewInternalError(err)
|
||||
}
|
||||
}
|
||||
|
||||
if work.ResourceVersion != "" {
|
||||
return work.ResourceVersion
|
||||
if generation == 0 {
|
||||
generation = work.Generation
|
||||
}
|
||||
|
||||
return "0"
|
||||
if len(resourceVersion) == 0 {
|
||||
if len(work.ResourceVersion) != 0 {
|
||||
resourceVersion = work.ResourceVersion
|
||||
} else {
|
||||
resourceVersion = "0"
|
||||
}
|
||||
}
|
||||
|
||||
return resourceVersion, generation, nil
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@ package codec
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
||||
cloudeventstypes "github.com/cloudevents/sdk-go/v2/types"
|
||||
|
||||
@@ -36,15 +34,10 @@ func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT
|
||||
return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType)
|
||||
}
|
||||
|
||||
resourceVersion, err := strconv.Atoi(work.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert resource version %s to int: %v", work.ResourceVersion, err)
|
||||
}
|
||||
|
||||
evt := types.NewEventBuilder(source, eventType).
|
||||
WithClusterName(work.Namespace).
|
||||
WithResourceID(string(work.UID)).
|
||||
WithResourceVersion(int64(resourceVersion)).
|
||||
WithResourceVersion(work.Generation).
|
||||
NewEvent()
|
||||
|
||||
// set the work's meta data to its cloud event
|
||||
@@ -120,7 +113,7 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo
|
||||
if len(metaObj.Name) == 0 {
|
||||
metaObj.Name = resourceID
|
||||
}
|
||||
metaObj.ResourceVersion = fmt.Sprintf("%d", resourceVersion)
|
||||
metaObj.Generation = int64(resourceVersion)
|
||||
if metaObj.Annotations == nil {
|
||||
metaObj.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
52
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store/base.go
generated
vendored
52
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store/base.go
generated
vendored
@@ -1,8 +1,8 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
@@ -27,7 +27,7 @@ type baseSourceStore struct {
|
||||
receivedWorks workqueue.TypedRateLimitingInterface[*workv1.ManifestWork]
|
||||
}
|
||||
|
||||
func (bs *baseSourceStore) HandleReceivedResource(action types.ResourceAction, work *workv1.ManifestWork) error {
|
||||
func (bs *baseSourceStore) HandleReceivedResource(_ context.Context, action types.ResourceAction, work *workv1.ManifestWork) error {
|
||||
switch action {
|
||||
case types.StatusModified:
|
||||
bs.receivedWorks.Add(work)
|
||||
@@ -50,26 +50,26 @@ func newWorkProcessor(works workqueue.TypedRateLimitingInterface[*workv1.Manifes
|
||||
}
|
||||
}
|
||||
|
||||
func (b *workProcessor) run(stopCh <-chan struct{}) {
|
||||
func (b *workProcessor) run(ctx context.Context) {
|
||||
defer b.works.ShutDown()
|
||||
|
||||
// start a goroutine to handle the works from the queue
|
||||
// the .Until will re-kick the runWorker one second after the runWorker completes
|
||||
go wait.Until(b.runWorker, time.Second, stopCh)
|
||||
go wait.UntilWithContext(ctx, b.runWorker, time.Second)
|
||||
|
||||
// wait until we're told to stop
|
||||
<-stopCh
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (b *workProcessor) runWorker() {
|
||||
func (b *workProcessor) runWorker(ctx context.Context) {
|
||||
// hot loop until we're told to stop. processNextEvent will automatically wait until there's work available, so
|
||||
// we don't worry about secondary waits
|
||||
for b.processNextWork() {
|
||||
for b.processNextWork(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWork deals with one key off the queue.
|
||||
func (b *workProcessor) processNextWork() bool {
|
||||
func (b *workProcessor) processNextWork(ctx context.Context) bool {
|
||||
// pull the next event item from queue.
|
||||
// events queue blocks until it can return an item to be processed
|
||||
key, quit := b.works.Get()
|
||||
@@ -79,7 +79,7 @@ func (b *workProcessor) processNextWork() bool {
|
||||
}
|
||||
defer b.works.Done(key)
|
||||
|
||||
if err := b.handleWork(key); err != nil {
|
||||
if err := b.handleWork(ctx, key); err != nil {
|
||||
// we failed to handle the work, we should requeue the item to work on later
|
||||
// this method will add a backoff to avoid hotlooping on particular items
|
||||
b.works.AddRateLimited(key)
|
||||
@@ -91,8 +91,9 @@ func (b *workProcessor) processNextWork() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *workProcessor) handleWork(work *workv1.ManifestWork) error {
|
||||
lastWork := b.getWork(work.UID)
|
||||
func (b *workProcessor) handleWork(ctx context.Context, work *workv1.ManifestWork) error {
|
||||
logger := klog.FromContext(ctx).WithValues("manifestWorkNamespace", work.Namespace, "manifestWorkName", work.Name)
|
||||
lastWork := b.getWork(ctx, work.UID)
|
||||
if lastWork == nil {
|
||||
// the work is not found from the local cache and it has been deleted by the agent,
|
||||
// ignore this work.
|
||||
@@ -114,22 +115,10 @@ func (b *workProcessor) handleWork(work *workv1.ManifestWork) error {
|
||||
return b.store.Delete(updatedWork)
|
||||
}
|
||||
|
||||
lastResourceVersion, err := strconv.Atoi(lastWork.ResourceVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
resourceVersion, err := strconv.Atoi(work.ResourceVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// the current work's version is maintained on source and the agent's work is newer than source, ignore
|
||||
if lastResourceVersion != 0 && resourceVersion > lastResourceVersion {
|
||||
klog.Warningf("the work %s/%s resource version %d is great than its generation %d, ignore",
|
||||
lastWork.Namespace, lastWork.Name, resourceVersion, lastResourceVersion)
|
||||
if lastWork.Generation != 0 && work.Generation > lastWork.Generation {
|
||||
logger.Info("the work generation is greater than its local generation, ignore",
|
||||
"localGeneration", lastWork.Generation, "remoteGeneration", work.Generation)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -140,13 +129,13 @@ func (b *workProcessor) handleWork(work *workv1.ManifestWork) error {
|
||||
sequenceID := work.Annotations[common.CloudEventsSequenceIDAnnotationKey]
|
||||
greater, err := utils.CompareSnowflakeSequenceIDs(lastSequenceID, sequenceID)
|
||||
if err != nil {
|
||||
klog.Errorf("invalid sequenceID for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err)
|
||||
logger.Error(err, "invalid sequenceID for work")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !greater {
|
||||
klog.Warningf("the work %s/%s current sequenceID %s is less than its last %s, ignore",
|
||||
lastWork.Namespace, lastWork.Name, sequenceID, lastSequenceID)
|
||||
logger.Info("the work current sequenceID is less than its last, ignore",
|
||||
"currentSequenceID", sequenceID, "lastSequenceID", lastSequenceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -163,10 +152,11 @@ func (b *workProcessor) handleWork(work *workv1.ManifestWork) error {
|
||||
return b.store.Update(updatedWork)
|
||||
}
|
||||
|
||||
func (b *workProcessor) getWork(uid kubetypes.UID) *workv1.ManifestWork {
|
||||
func (b *workProcessor) getWork(ctx context.Context, uid kubetypes.UID) *workv1.ManifestWork {
|
||||
logger := klog.FromContext(ctx)
|
||||
works, err := b.store.ListAll()
|
||||
if err != nil {
|
||||
klog.Errorf("failed to lists works, %v", err)
|
||||
logger.Error(err, "failed to lists works")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
96
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store/informer.go
generated
vendored
96
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store/informer.go
generated
vendored
@@ -3,14 +3,15 @@ package store
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
workv1 "open-cluster-management.io/api/work/v1"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/clients/store"
|
||||
@@ -43,7 +44,7 @@ func NewSourceInformerWatcherStore(ctx context.Context) *SourceInformerWatcherSt
|
||||
}
|
||||
|
||||
// start a goroutine to process the received work events from the work queue with current store.
|
||||
go newWorkProcessor(s.receivedWorks, s).run(ctx.Done())
|
||||
go newWorkProcessor(s.receivedWorks, s).run(ctx)
|
||||
|
||||
return s
|
||||
}
|
||||
@@ -88,6 +89,38 @@ func (s *SourceInformerWatcherStore) SetInformer(informer cache.SharedIndexInfor
|
||||
// It is used for building ManifestWork agent client.
|
||||
type AgentInformerWatcherStore struct {
|
||||
store.AgentInformerWatcherStore[*workv1.ManifestWork]
|
||||
|
||||
versions *versioner
|
||||
}
|
||||
|
||||
type versioner struct {
|
||||
versions map[string]int64
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func newVersioner() *versioner {
|
||||
return &versioner{
|
||||
versions: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *versioner) increment(name string) int64 {
|
||||
v.lock.Lock()
|
||||
defer v.lock.Unlock()
|
||||
|
||||
if _, ok := v.versions[name]; !ok {
|
||||
v.versions[name] = 1
|
||||
} else {
|
||||
v.versions[name] = v.versions[name] + 1
|
||||
}
|
||||
|
||||
return v.versions[name]
|
||||
}
|
||||
|
||||
func (v *versioner) delete(name string) {
|
||||
v.lock.Lock()
|
||||
defer v.lock.Unlock()
|
||||
delete(v.versions, name)
|
||||
}
|
||||
|
||||
var _ store.ClientWatcherStore[*workv1.ManifestWork] = &AgentInformerWatcherStore{}
|
||||
@@ -95,13 +128,43 @@ var _ store.ClientWatcherStore[*workv1.ManifestWork] = &AgentInformerWatcherStor
|
||||
func NewAgentInformerWatcherStore() *AgentInformerWatcherStore {
|
||||
return &AgentInformerWatcherStore{
|
||||
AgentInformerWatcherStore: store.AgentInformerWatcherStore[*workv1.ManifestWork]{
|
||||
BaseClientWatchStore: store.BaseClientWatchStore[*workv1.ManifestWork]{},
|
||||
Watcher: store.NewWatcher(),
|
||||
BaseClientWatchStore: store.BaseClientWatchStore[*workv1.ManifestWork]{
|
||||
Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
},
|
||||
Watcher: store.NewWatcher(),
|
||||
},
|
||||
versions: newVersioner(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore) HandleReceivedResource(action types.ResourceAction, work *workv1.ManifestWork) error {
|
||||
func (s *AgentInformerWatcherStore) Add(resource runtime.Object) error {
|
||||
accessor, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor.SetResourceVersion(strconv.FormatInt(s.versions.increment(accessor.GetName()), 10))
|
||||
return s.AgentInformerWatcherStore.Add(resource)
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore) Update(resource runtime.Object) error {
|
||||
accessor, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor.SetResourceVersion(strconv.FormatInt(s.versions.increment(accessor.GetName()), 10))
|
||||
return s.AgentInformerWatcherStore.Update(resource)
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore) Delete(resource runtime.Object) error {
|
||||
accessor, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.versions.delete(accessor.GetName())
|
||||
return s.AgentInformerWatcherStore.Delete(resource)
|
||||
}
|
||||
|
||||
func (s *AgentInformerWatcherStore) HandleReceivedResource(ctx context.Context, action types.ResourceAction, work *workv1.ManifestWork) error {
|
||||
switch action {
|
||||
case types.Added:
|
||||
return s.Add(work.DeepCopy())
|
||||
@@ -113,20 +176,17 @@ func (s *AgentInformerWatcherStore) HandleReceivedResource(action types.Resource
|
||||
if !exists {
|
||||
return fmt.Errorf("the work %s/%s does not exist", work.Namespace, work.Name)
|
||||
}
|
||||
// prevent the work from being updated if it is deleting
|
||||
if !lastWork.GetDeletionTimestamp().IsZero() {
|
||||
klog.Warningf("the work %s/%s is deleting, ignore the update", work.Namespace, work.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
updatedWork := work.DeepCopy()
|
||||
|
||||
// restore the fields that are maintained by local agent
|
||||
updatedWork.Labels = lastWork.Labels
|
||||
updatedWork.Annotations = lastWork.Annotations
|
||||
// prevent the work from being updated if it is deleting
|
||||
if !lastWork.GetDeletionTimestamp().IsZero() {
|
||||
updatedWork.SetDeletionTimestamp(lastWork.DeletionTimestamp)
|
||||
}
|
||||
|
||||
// restore the fields that are maintained by local agent.
|
||||
updatedWork.Finalizers = lastWork.Finalizers
|
||||
updatedWork.Status = lastWork.Status
|
||||
|
||||
return s.Update(updatedWork)
|
||||
case types.Deleted:
|
||||
// the manifestwork is deleting on the source, we just update its deletion timestamp.
|
||||
@@ -138,10 +198,12 @@ func (s *AgentInformerWatcherStore) HandleReceivedResource(action types.Resource
|
||||
return nil
|
||||
}
|
||||
|
||||
// update the deletionTimeStamp and generation of last work.
|
||||
// generation needs to be updated because it is possible that generation still change after
|
||||
// the object is in deleting state.
|
||||
updatedWork := lastWork.DeepCopy()
|
||||
updatedWork.Generation = work.Generation
|
||||
updatedWork.ResourceVersion = work.ResourceVersion
|
||||
updatedWork.DeletionTimestamp = work.DeletionTimestamp
|
||||
updatedWork.Generation = work.Generation
|
||||
return s.Update(updatedWork)
|
||||
default:
|
||||
return fmt.Errorf("unsupported resource action %s", action)
|
||||
|
||||
22
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store/local.go
generated
vendored
22
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/clients/work/store/local.go
generated
vendored
@@ -2,6 +2,7 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -88,10 +89,10 @@ func NewSourceLocalWatcherStore(ctx context.Context, listFunc ListLocalWorksFunc
|
||||
}
|
||||
|
||||
// start a goroutine to process the received work events from the work queue with current store.
|
||||
go newWorkProcessor(s.receivedWorks, s).run(ctx.Done())
|
||||
go newWorkProcessor(s.receivedWorks, s).run(ctx)
|
||||
|
||||
// start a goroutine to handle the events that are produced by work client
|
||||
go wait.Until(s.processLoop, time.Second, ctx.Done())
|
||||
go wait.UntilWithContext(ctx, s.processLoop, time.Second)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
@@ -161,7 +162,8 @@ func (s *SourceLocalWatcherStore) GetWatcher(namespace string, opts metav1.ListO
|
||||
}
|
||||
|
||||
// processLoop drains the work event queue and send the event to the watch channel.
|
||||
func (s *SourceLocalWatcherStore) processLoop() {
|
||||
func (s *SourceLocalWatcherStore) processLoop(ctx context.Context) {
|
||||
logger := klog.FromContext(ctx)
|
||||
for {
|
||||
// this will be blocked until the event queue has events
|
||||
obj, err := s.eventQueue.Pop(func(interface{}, bool) error {
|
||||
@@ -173,23 +175,23 @@ func (s *SourceLocalWatcherStore) processLoop() {
|
||||
return
|
||||
}
|
||||
|
||||
klog.Warningf("failed to pop the %v requeue it, %v", obj, err)
|
||||
logger.Error(err, "failed to pop the object, requeue it", "object", obj)
|
||||
// this is the safe way to re-enqueue.
|
||||
if err := s.eventQueue.Add(obj); err != nil {
|
||||
klog.Errorf("failed to requeue the obj %v, %v", obj, err)
|
||||
logger.Error(err, "failed to requeue the obj", "object", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
evt, ok := obj.(*watchEvent)
|
||||
if !ok {
|
||||
klog.Errorf("unknown the object type %T from the event queue", obj)
|
||||
logger.Error(errors.New("unknown the object type from the event queue"), "", "object", obj)
|
||||
return
|
||||
}
|
||||
|
||||
obj, exists, err := s.Store.GetByKey(evt.Key)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get the work %s, %v", evt.Key, err)
|
||||
logger.Error(err, "failed to get the work", "key", evt.Key)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -197,7 +199,7 @@ func (s *SourceLocalWatcherStore) processLoop() {
|
||||
if evt.Type == watch.Deleted {
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(evt.Key)
|
||||
if err != nil {
|
||||
klog.Errorf("unexpected event key %s, %v", evt.Key, err)
|
||||
logger.Error(err, "unexpected event key", "key", evt.Key)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -215,13 +217,13 @@ func (s *SourceLocalWatcherStore) processLoop() {
|
||||
return
|
||||
}
|
||||
|
||||
klog.Errorf("the work %s does not exist in the cache", evt.Key)
|
||||
logger.Error(errors.New("the work does not exist in the cache"), "", "key", evt.Key)
|
||||
return
|
||||
}
|
||||
|
||||
work, ok := obj.(*workv1.ManifestWork)
|
||||
if !ok {
|
||||
klog.Errorf("unknown the object type %T from the cache", obj)
|
||||
logger.Error(fmt.Errorf("unknown object type %T from the cache", obj), "")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
9
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/constants/constants.go
generated
vendored
9
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/constants/constants.go
generated
vendored
@@ -1,7 +1,10 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
ConfigTypeMQTT = "mqtt"
|
||||
ConfigTypeGRPC = "grpc"
|
||||
ConfigTypeKafka = "kafka"
|
||||
ConfigTypeMQTT = "mqtt"
|
||||
ConfigTypeGRPC = "grpc"
|
||||
)
|
||||
|
||||
// GRPCSubscriptionIDKey is the key for the gRPC subscription ID.
|
||||
// This ID is generated by the gRPC server after the client subscribes to it.
|
||||
const GRPCSubscriptionIDKey = "subscription-id"
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package generic
|
||||
package clients
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
||||
@@ -11,20 +10,23 @@ import (
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/utils"
|
||||
)
|
||||
|
||||
// CloudEventAgentClient is a client for an agent to resync/send/receive its resources with cloud events.
|
||||
//
|
||||
// An agent is a component that handles the deployment of requested resources on the managed cluster and status report
|
||||
// to the source.
|
||||
type CloudEventAgentClient[T ResourceObject] struct {
|
||||
type CloudEventAgentClient[T generic.ResourceObject] struct {
|
||||
*baseClient
|
||||
lister Lister[T]
|
||||
codec Codec[T]
|
||||
statusHashGetter StatusHashGetter[T]
|
||||
lister generic.Lister[T]
|
||||
codec generic.Codec[T]
|
||||
statusHashGetter generic.StatusHashGetter[T]
|
||||
agentID string
|
||||
clusterName string
|
||||
}
|
||||
@@ -36,19 +38,18 @@ type CloudEventAgentClient[T ResourceObject] struct {
|
||||
// - lister gets the resources from a cache/store of an agent.
|
||||
// - statusHashGetter calculates the resource status hash.
|
||||
// - codec is used to encode/decode a resource objet/cloudevent to/from a cloudevent/resource objet.
|
||||
func NewCloudEventAgentClient[T ResourceObject](
|
||||
func NewCloudEventAgentClient[T generic.ResourceObject](
|
||||
ctx context.Context,
|
||||
agentOptions *options.CloudEventsAgentOptions,
|
||||
lister Lister[T],
|
||||
statusHashGetter StatusHashGetter[T],
|
||||
codec Codec[T],
|
||||
) (*CloudEventAgentClient[T], error) {
|
||||
lister generic.Lister[T],
|
||||
statusHashGetter generic.StatusHashGetter[T],
|
||||
codec generic.Codec[T],
|
||||
) (generic.CloudEventsClient[T], error) {
|
||||
baseClient := &baseClient{
|
||||
clientID: agentOptions.AgentID,
|
||||
cloudEventsOptions: agentOptions.CloudEventsOptions,
|
||||
cloudEventsRateLimiter: NewRateLimiter(agentOptions.EventRateLimit),
|
||||
transport: agentOptions.CloudEventsTransport,
|
||||
cloudEventsRateLimiter: utils.NewRateLimiter(agentOptions.EventRateLimit),
|
||||
reconnectedChan: make(chan struct{}),
|
||||
dataType: codec.EventDataType(),
|
||||
}
|
||||
|
||||
if err := baseClient.connect(ctx); err != nil {
|
||||
@@ -82,14 +83,10 @@ func (c *CloudEventAgentClient[T]) Resync(ctx context.Context, source string) er
|
||||
|
||||
resources := &payload.ResourceVersionList{Versions: make([]payload.ResourceVersion, len(objs))}
|
||||
for i, obj := range objs {
|
||||
resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resources.Versions[i] = payload.ResourceVersion{
|
||||
ResourceID: string(obj.GetUID()),
|
||||
ResourceVersion: resourceVersion,
|
||||
ResourceID: string(obj.GetUID()),
|
||||
// this should be set as generation, since the resource version of the object is local version.
|
||||
ResourceVersion: obj.GetGeneration(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,7 +108,7 @@ func (c *CloudEventAgentClient[T]) Resync(ctx context.Context, source string) er
|
||||
return err
|
||||
}
|
||||
|
||||
increaseCloudEventsSentFromAgentCounter(evt.Source(), source, c.codec.EventDataType().String(), string(eventType.SubResource), string(eventType.Action))
|
||||
metrics.IncreaseCloudEventsSentFromAgentCounter(evt.Source(), source, c.codec.EventDataType().String(), string(eventType.SubResource), string(eventType.Action))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -132,7 +129,7 @@ func (c *CloudEventAgentClient[T]) Publish(ctx context.Context, eventType types.
|
||||
}
|
||||
|
||||
originalSource, _ := cloudeventstypes.ToString(evt.Context.GetExtensions()[types.ExtensionOriginalSource])
|
||||
increaseCloudEventsSentFromAgentCounter(evt.Source(), originalSource, eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
metrics.IncreaseCloudEventsSentFromAgentCounter(evt.Source(), originalSource, eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -140,66 +137,70 @@ func (c *CloudEventAgentClient[T]) Publish(ctx context.Context, eventType types.
|
||||
// Subscribe the events that are from the source status resync request or source resource spec request.
|
||||
// For status resync request, agent publish the current resources status back as response.
|
||||
// For resource spec request, agent receives resource spec and handles the spec with resource handlers.
|
||||
func (c *CloudEventAgentClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) {
|
||||
func (c *CloudEventAgentClient[T]) Subscribe(ctx context.Context, handlers ...generic.ResourceHandler[T]) {
|
||||
c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) {
|
||||
c.receive(ctx, evt, handlers...)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) {
|
||||
func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...generic.ResourceHandler[T]) {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
eventType, err := types.ParseCloudEventsType(evt.Type())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err)
|
||||
logger.Error(err, "failed to parse cloud event type", "eventType", evt.Type())
|
||||
return
|
||||
}
|
||||
logger = logger.WithValues("eventType", evt.Type())
|
||||
|
||||
increaseCloudEventsReceivedByAgentCounter(evt.Source(), eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
metrics.IncreaseCloudEventsReceivedByAgentCounter(evt.Source(), eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
|
||||
if eventType.Action == types.ResyncRequestAction {
|
||||
if eventType.SubResource != types.SubResourceStatus {
|
||||
klog.Warningf("unsupported resync event type %s, ignore", eventType)
|
||||
logger.Info("ignore unsupported resync event type")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
if err := c.respondResyncStatusRequest(ctx, eventType.CloudEventsDataType, evt); err != nil {
|
||||
klog.Errorf("failed to resync manifestsstatus, %v", err)
|
||||
logger.Error(err, "failed to resync manifestsstatus.")
|
||||
}
|
||||
updateResourceStatusResyncDurationMetric(evt.Source(), c.clusterName, eventType.CloudEventsDataType.String(), startTime)
|
||||
metrics.UpdateResourceStatusResyncDurationMetric(evt.Source(), c.clusterName, eventType.CloudEventsDataType.String(), startTime)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if eventType.SubResource != types.SubResourceSpec {
|
||||
klog.Warningf("unsupported event type %s, ignore", eventType)
|
||||
logger.Info("ignore unsupported event type")
|
||||
return
|
||||
}
|
||||
|
||||
evtExtensions := evt.Context.GetExtensions()
|
||||
clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName])
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get clustername extension: %v", err)
|
||||
logger.Error(err, "failed to get clustername extension")
|
||||
return
|
||||
}
|
||||
if clusterName != c.clusterName {
|
||||
klog.V(4).Infof("event clustername %s and agent clustername %s do not match, ignore", clusterName, c.clusterName)
|
||||
logger.V(4).Info("event clustername and agent clustername do not match, ignore",
|
||||
"eventClusterName", clusterName, "agentClusterName", c.clusterName)
|
||||
return
|
||||
}
|
||||
|
||||
if eventType.CloudEventsDataType != c.codec.EventDataType() {
|
||||
klog.Warningf("unsupported event data type %s, ignore", eventType.CloudEventsDataType)
|
||||
logger.Info("unsupported event data type, ignore", "eventDataType", eventType.CloudEventsDataType)
|
||||
return
|
||||
}
|
||||
|
||||
obj, err := c.codec.Decode(&evt)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to decode spec, %v", err)
|
||||
logger.Error(err, "failed to decode spec")
|
||||
return
|
||||
}
|
||||
|
||||
action, err := c.specAction(evt.Source(), eventType.CloudEventsDataType, obj)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to generate spec action %s, %v", evt, err)
|
||||
logger.Error(err, "failed to generate spec action", "event", evt)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -209,8 +210,8 @@ func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
if err := handler(action, obj); err != nil {
|
||||
klog.Errorf("failed to handle spec event %s, %v", evt, err)
|
||||
if err := handler(ctx, action, obj); err != nil {
|
||||
logger.Error(err, "failed to handle spec event", "event", evt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -224,6 +225,8 @@ func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.
|
||||
func (c *CloudEventAgentClient[T]) respondResyncStatusRequest(
|
||||
ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event,
|
||||
) error {
|
||||
logger := klog.FromContext(ctx).WithValues("eventDataType", eventDataType.String())
|
||||
|
||||
options := types.ListOptions{ClusterName: c.clusterName, Source: evt.Source(), CloudEventsDataType: eventDataType}
|
||||
objs, err := c.lister.List(options)
|
||||
if err != nil {
|
||||
@@ -256,7 +259,7 @@ func (c *CloudEventAgentClient[T]) respondResyncStatusRequest(
|
||||
lastHash, ok := findStatusHash(string(obj.GetUID()), statusHashes.Hashes)
|
||||
if !ok {
|
||||
// ignore the resource that is not on the source, but exists on the agent, wait for the source deleting it
|
||||
klog.Infof("The resource %s is not found from the source, ignore", obj.GetUID())
|
||||
logger.Info("The resource is not found from the source, ignore", "uid", obj.GetUID())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -295,30 +298,20 @@ func (c *CloudEventAgentClient[T]) specAction(
|
||||
return types.Deleted, nil
|
||||
}
|
||||
|
||||
// if both the current and the last object have the resource version "0", then object
|
||||
// if both the current and the last object have the generation "0" or empty, then object
|
||||
// is considered as modified, the message broker guarantees the order of the messages
|
||||
if obj.GetResourceVersion() == "0" && lastObj.GetResourceVersion() == "0" {
|
||||
if lastObj.GetGeneration() == 0 && obj.GetGeneration() == 0 {
|
||||
return types.Modified, nil
|
||||
}
|
||||
|
||||
resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return evt, err
|
||||
}
|
||||
|
||||
lastResourceVersion, err := strconv.ParseInt(lastObj.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return evt, err
|
||||
}
|
||||
|
||||
if resourceVersion <= lastResourceVersion {
|
||||
if obj.GetGeneration() < lastObj.GetGeneration() {
|
||||
return evt, nil
|
||||
}
|
||||
|
||||
return types.Modified, nil
|
||||
}
|
||||
|
||||
func getObj[T ResourceObject](resourceID string, objs []T) (obj T, exists bool) {
|
||||
func getObj[T generic.ResourceObject](resourceID string, objs []T) (obj T, exists bool) {
|
||||
for _, obj := range objs {
|
||||
if string(obj.GetUID()) == resourceID {
|
||||
return obj, true
|
||||
@@ -1,4 +1,4 @@
|
||||
package generic
|
||||
package clients
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -14,8 +14,9 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,29 +33,41 @@ var DelayFn = wait.Backoff{
|
||||
Jitter: 1.0,
|
||||
}.DelayWithReset(&clock.RealClock{}, 10*time.Minute)
|
||||
|
||||
// receiveFn is an internal callback function for processing received CloudEvents with context.
|
||||
type receiveFn func(ctx context.Context, evt cloudevents.Event)
|
||||
|
||||
// baseClient provides the core functionality for CloudEvents source and agent clients.
|
||||
//
|
||||
// It handles three primary responsibilities:
|
||||
// 1. Automatic reconnection when the transport connection fails
|
||||
// 2. Event subscription management with receiver restart capability
|
||||
// 3. Rate-limited event publishing
|
||||
//
|
||||
// The client maintains connection state and automatically attempts to reconnect when
|
||||
// errors are detected from the transport layer. Upon successful reconnection, it restarts
|
||||
// the event receiver and notifies listeners via the reconnectedChan.
|
||||
//
|
||||
// Thread-safety: All public methods are safe for concurrent use. The clientReady flag
|
||||
// and receiverChan are protected by an embedded RWMutex.
|
||||
type baseClient struct {
|
||||
sync.RWMutex
|
||||
clientID string // the client id is used to identify the client, either a source or an agent ID
|
||||
cloudEventsOptions options.CloudEventsOptions
|
||||
cloudEventsProtocol options.CloudEventsProtocol
|
||||
cloudEventsClient cloudevents.Client
|
||||
transport options.CloudEventTransport
|
||||
cloudEventsRateLimiter flowcontrol.RateLimiter
|
||||
receiverChan chan int
|
||||
reconnectedChan chan struct{}
|
||||
clientReady bool
|
||||
dataType types.CloudEventsDataType
|
||||
}
|
||||
|
||||
func (c *baseClient) connect(ctx context.Context) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
var err error
|
||||
c.cloudEventsClient, err = c.newCloudEventsClient(ctx)
|
||||
err = c.transport.Connect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.setClientReady(true)
|
||||
|
||||
// start a go routine to handle cloudevents client connection errors
|
||||
go func() {
|
||||
@@ -62,43 +75,39 @@ func (c *baseClient) connect(ctx context.Context) error {
|
||||
if !c.isClientReady() {
|
||||
logger.V(2).Info("reconnecting the cloudevents client")
|
||||
|
||||
c.cloudEventsClient, err = c.newCloudEventsClient(ctx)
|
||||
err = c.transport.Connect(ctx)
|
||||
// TODO enhance the cloudevents SKD to avoid wrapping the error type to distinguish the net connection
|
||||
// errors
|
||||
if err != nil {
|
||||
// failed to reconnect, try agin
|
||||
runtime.HandleError(fmt.Errorf("the cloudevents client reconnect failed, %v", err))
|
||||
// failed to reconnect, try again
|
||||
runtime.HandleErrorWithContext(ctx, err, "the cloudevents client reconnect failed")
|
||||
<-wait.RealTimer(DelayFn()).C()
|
||||
continue
|
||||
}
|
||||
// the cloudevents network connection is back, mark the client ready and send the receiver restart signal
|
||||
logger.V(2).Info("the cloudevents client is reconnected")
|
||||
increaseClientReconnectedCounter(c.clientID)
|
||||
metrics.IncreaseClientReconnectedCounter(c.clientID)
|
||||
c.setClientReady(true)
|
||||
c.sendReceiverSignal(restartReceiverSignal)
|
||||
c.sendReconnectedSignal()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if c.receiverChan != nil {
|
||||
close(c.receiverChan)
|
||||
}
|
||||
c.closeChannels()
|
||||
return
|
||||
case err, ok := <-c.cloudEventsOptions.ErrorChan():
|
||||
case err, ok := <-c.transport.ErrorChan():
|
||||
if !ok {
|
||||
// error channel is closed, do nothing
|
||||
return
|
||||
}
|
||||
|
||||
runtime.HandleError(fmt.Errorf("the cloudevents client is disconnected, %v", err))
|
||||
runtime.HandleErrorWithContext(ctx, err, "the cloudevents client is disconnected")
|
||||
|
||||
// the cloudevents client network connection is closed, send the receiver stop signal, set the current client not ready
|
||||
// and close the current client
|
||||
c.sendReceiverSignal(stopReceiverSignal)
|
||||
c.setClientReady(false)
|
||||
if err := c.cloudEventsProtocol.Close(ctx); err != nil {
|
||||
runtime.HandleError(fmt.Errorf("failed to close the cloudevents protocol, %v", err))
|
||||
if err := c.transport.Close(ctx); err != nil {
|
||||
runtime.HandleErrorWithContext(ctx, err, "failed to close the cloudevents protocol")
|
||||
}
|
||||
|
||||
<-wait.RealTimer(DelayFn()).C()
|
||||
@@ -118,7 +127,7 @@ func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error {
|
||||
}
|
||||
|
||||
latency := time.Since(now)
|
||||
if latency > longThrottleLatency {
|
||||
if latency > utils.LongThrottleLatency {
|
||||
logger.V(3).Info(
|
||||
"Client-side throttling delay (not priority and fairness)",
|
||||
"latency", latency,
|
||||
@@ -126,18 +135,15 @@ func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error {
|
||||
)
|
||||
}
|
||||
|
||||
sendingCtx, err := c.cloudEventsOptions.WithContext(ctx, evt.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.isClientReady() {
|
||||
return fmt.Errorf("the cloudevents client is not ready")
|
||||
}
|
||||
|
||||
logger.V(2).Info("Sending event", "context", sendingCtx, "event", evt.Context)
|
||||
logger.V(5).Info("Sending event", "event", func() any { return evt.String() })
|
||||
if err := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(err) {
|
||||
logger.V(2).Info("Sending event", "event", evt.Context)
|
||||
if logger.V(5).Enabled() {
|
||||
logger.V(5).Info("Sending event", "event", evt.String())
|
||||
}
|
||||
if err := c.transport.Send(ctx, evt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -155,21 +161,52 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) {
|
||||
return
|
||||
}
|
||||
|
||||
// send subscription request before starting to receive events
|
||||
if err := c.transport.Subscribe(ctx); err != nil {
|
||||
runtime.HandleErrorWithContext(ctx, err, "failed to subscribe")
|
||||
return
|
||||
}
|
||||
|
||||
c.receiverChan = make(chan int)
|
||||
|
||||
// start a go routine to handle cloudevents subscription
|
||||
go func() {
|
||||
receiverCtx, receiverCancel := context.WithCancel(context.TODO())
|
||||
receiverCtx, receiverCancel := context.WithCancel(ctx)
|
||||
startReceiving := true
|
||||
subscribed := true
|
||||
|
||||
for {
|
||||
if !subscribed {
|
||||
// resubscribe before restarting the receiver
|
||||
if err := c.transport.Subscribe(ctx); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
receiverCancel()
|
||||
return
|
||||
}
|
||||
|
||||
runtime.HandleError(fmt.Errorf("failed to resubscribe, %v", err))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
receiverCancel()
|
||||
return
|
||||
case <-wait.RealTimer(DelayFn()).C():
|
||||
}
|
||||
continue
|
||||
}
|
||||
subscribed = true
|
||||
// notify the client caller to resync the resources
|
||||
c.sendReconnectedSignal(ctx)
|
||||
}
|
||||
|
||||
if startReceiving {
|
||||
go func() {
|
||||
if err := c.cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) {
|
||||
if err := c.transport.Receive(receiverCtx, func(ctx context.Context, evt cloudevents.Event) {
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(2).Info("Received event", "event", evt.Context)
|
||||
logger.V(5).Info("Received event", "event", func() any { return evt.String() })
|
||||
|
||||
receive(receiverCtx, evt)
|
||||
if logger.V(5).Enabled() {
|
||||
logger.V(5).Info("Received event", "event", evt.String())
|
||||
}
|
||||
receive(ctx, evt)
|
||||
}); err != nil {
|
||||
runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err))
|
||||
}
|
||||
@@ -181,7 +218,7 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) {
|
||||
case <-ctx.Done():
|
||||
receiverCancel()
|
||||
return
|
||||
case signal, ok := <-c.receiverChan:
|
||||
case signal, ok := <-c.getReceiverChan():
|
||||
if !ok {
|
||||
// receiver channel is closed, stop the receiver
|
||||
receiverCancel()
|
||||
@@ -192,13 +229,14 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) {
|
||||
case restartReceiverSignal:
|
||||
logger.V(2).Info("restart the cloudevents receiver")
|
||||
// rebuild the receiver context and restart receiving
|
||||
receiverCtx, receiverCancel = context.WithCancel(context.TODO())
|
||||
receiverCtx, receiverCancel = context.WithCancel(ctx)
|
||||
startReceiving = true
|
||||
subscribed = false
|
||||
case stopReceiverSignal:
|
||||
logger.V(2).Info("stop the cloudevents receiver")
|
||||
receiverCancel()
|
||||
default:
|
||||
runtime.HandleError(fmt.Errorf("unknown receiver signal %d", signal))
|
||||
runtime.HandleErrorWithContext(ctx, fmt.Errorf("unknown receiver signal"), "", "signal", signal)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -214,10 +252,32 @@ func (c *baseClient) sendReceiverSignal(signal int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) sendReconnectedSignal() {
|
||||
func (c *baseClient) closeChannels() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.receiverChan != nil {
|
||||
close(c.receiverChan)
|
||||
c.receiverChan = nil
|
||||
}
|
||||
if c.reconnectedChan != nil {
|
||||
close(c.reconnectedChan)
|
||||
c.reconnectedChan = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) sendReconnectedSignal(ctx context.Context) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
c.reconnectedChan <- struct{}{}
|
||||
if c.reconnectedChan != nil {
|
||||
select {
|
||||
case c.reconnectedChan <- struct{}{}:
|
||||
// Signal sent successfully
|
||||
default:
|
||||
// No receiver listening on reconnectedChan, that's okay - don't block
|
||||
klog.FromContext(ctx).Info("reconnected signal not sent, no receiver listening")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) isClientReady() bool {
|
||||
@@ -232,19 +292,8 @@ func (c *baseClient) setClientReady(ready bool) {
|
||||
c.clientReady = ready
|
||||
}
|
||||
|
||||
func (c *baseClient) newCloudEventsClient(ctx context.Context) (cloudevents.Client, error) {
|
||||
var err error
|
||||
c.cloudEventsProtocol, err = c.cloudEventsOptions.Protocol(ctx, c.dataType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cloudEventsClient, err := cloudevents.NewClient(c.cloudEventsProtocol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.setClientReady(true)
|
||||
|
||||
return cloudEventsClient, nil
|
||||
func (c *baseClient) getReceiverChan() chan int {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.receiverChan
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package generic
|
||||
package clients
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -12,20 +12,23 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/utils"
|
||||
)
|
||||
|
||||
// CloudEventSourceClient is a client for a source to resync/send/receive its resources with cloud events.
|
||||
//
|
||||
// A source is a component that runs on a server, it can be a controller on the hub cluster or a RESTful service
|
||||
// handling resource requests.
|
||||
type CloudEventSourceClient[T ResourceObject] struct {
|
||||
type CloudEventSourceClient[T generic.ResourceObject] struct {
|
||||
*baseClient
|
||||
lister Lister[T]
|
||||
codec Codec[T]
|
||||
statusHashGetter StatusHashGetter[T]
|
||||
lister generic.Lister[T]
|
||||
codec generic.Codec[T]
|
||||
statusHashGetter generic.StatusHashGetter[T]
|
||||
sourceID string
|
||||
}
|
||||
|
||||
@@ -36,19 +39,18 @@ type CloudEventSourceClient[T ResourceObject] struct {
|
||||
// - lister gets the resources from a cache/store of a source.
|
||||
// - statusHashGetter calculates the resource status hash.
|
||||
// - codec is used to encode/decode a resource objet/cloudevent to/from a cloudevent/resource objet.
|
||||
func NewCloudEventSourceClient[T ResourceObject](
|
||||
func NewCloudEventSourceClient[T generic.ResourceObject](
|
||||
ctx context.Context,
|
||||
sourceOptions *options.CloudEventsSourceOptions,
|
||||
lister Lister[T],
|
||||
statusHashGetter StatusHashGetter[T],
|
||||
codec Codec[T],
|
||||
lister generic.Lister[T],
|
||||
statusHashGetter generic.StatusHashGetter[T],
|
||||
codec generic.Codec[T],
|
||||
) (*CloudEventSourceClient[T], error) {
|
||||
baseClient := &baseClient{
|
||||
clientID: sourceOptions.SourceID,
|
||||
cloudEventsOptions: sourceOptions.CloudEventsOptions,
|
||||
cloudEventsRateLimiter: NewRateLimiter(sourceOptions.EventRateLimit),
|
||||
transport: sourceOptions.CloudEventsTransport,
|
||||
cloudEventsRateLimiter: utils.NewRateLimiter(sourceOptions.EventRateLimit),
|
||||
reconnectedChan: make(chan struct{}),
|
||||
dataType: codec.EventDataType(),
|
||||
}
|
||||
|
||||
if err := baseClient.connect(ctx); err != nil {
|
||||
@@ -105,7 +107,7 @@ func (c *CloudEventSourceClient[T]) Resync(ctx context.Context, clusterName stri
|
||||
return err
|
||||
}
|
||||
|
||||
increaseCloudEventsSentFromSourceCounter(evt.Source(), clusterName, c.codec.EventDataType().String(), string(eventType.SubResource), string(eventType.Action))
|
||||
metrics.IncreaseCloudEventsSentFromSourceCounter(evt.Source(), clusterName, c.codec.EventDataType().String(), string(eventType.SubResource), string(eventType.Action))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -130,7 +132,7 @@ func (c *CloudEventSourceClient[T]) Publish(ctx context.Context, eventType types
|
||||
}
|
||||
|
||||
clusterName := evt.Context.GetExtensions()[types.ExtensionClusterName].(string)
|
||||
increaseCloudEventsSentFromSourceCounter(evt.Source(), clusterName, eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
metrics.IncreaseCloudEventsSentFromSourceCounter(evt.Source(), clusterName, eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -138,18 +140,21 @@ func (c *CloudEventSourceClient[T]) Publish(ctx context.Context, eventType types
|
||||
// Subscribe the events that are from the agent spec resync request or agent resource status request.
|
||||
// For spec resync request, source publish the current resources spec back as response.
|
||||
// For resource status request, source receives resource status and handles the status with resource handlers.
|
||||
func (c *CloudEventSourceClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) {
|
||||
func (c *CloudEventSourceClient[T]) Subscribe(ctx context.Context, handlers ...generic.ResourceHandler[T]) {
|
||||
c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) {
|
||||
c.receive(ctx, evt, handlers...)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) {
|
||||
func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...generic.ResourceHandler[T]) {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
eventType, err := types.ParseCloudEventsType(evt.Type())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to parse cloud event type, %v", err)
|
||||
logger.Error(err, "failed to parse cloud event type")
|
||||
return
|
||||
}
|
||||
logger = logger.WithValues("eventType", evt.Type())
|
||||
|
||||
// clusterName is not required for agent to send the request, in case of missing clusterName, set it to
|
||||
// empty string, as the source is sufficient to infer the event's originating cluster.
|
||||
@@ -158,48 +163,48 @@ func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents
|
||||
cn = ""
|
||||
}
|
||||
|
||||
increaseCloudEventsReceivedBySourceCounter(evt.Source(), cn, eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
metrics.IncreaseCloudEventsReceivedBySourceCounter(evt.Source(), cn, eventType.CloudEventsDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
|
||||
if eventType.Action == types.ResyncRequestAction {
|
||||
if eventType.SubResource != types.SubResourceSpec {
|
||||
klog.Warningf("unsupported event type %s, ignore", eventType)
|
||||
logger.Info("unsupported event type, ignore")
|
||||
return
|
||||
}
|
||||
|
||||
clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get cluster name extension, %v", err)
|
||||
logger.Error(err, "failed to get cluster name extension")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
if err := c.respondResyncSpecRequest(ctx, eventType.CloudEventsDataType, evt); err != nil {
|
||||
klog.Errorf("failed to resync resources spec, %v", err)
|
||||
logger.Error(err, "failed to resync resources spec")
|
||||
}
|
||||
updateResourceSpecResyncDurationMetric(c.sourceID, fmt.Sprintf("%s", clusterName), eventType.CloudEventsDataType.String(), startTime)
|
||||
metrics.UpdateResourceSpecResyncDurationMetric(c.sourceID, fmt.Sprintf("%s", clusterName), eventType.CloudEventsDataType.String(), startTime)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if eventType.CloudEventsDataType != c.codec.EventDataType() {
|
||||
klog.Warningf("unsupported event data type %s, ignore", eventType.CloudEventsDataType)
|
||||
logger.Info("unsupported event data type, ignore", "eventDataType", eventType.CloudEventsDataType)
|
||||
return
|
||||
}
|
||||
|
||||
if eventType.SubResource != types.SubResourceStatus {
|
||||
klog.Warningf("unsupported event type %s, ignore", eventType)
|
||||
logger.Info("unsupported event type, ignore")
|
||||
return
|
||||
}
|
||||
|
||||
obj, err := c.codec.Decode(&evt)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to decode status, %v", err)
|
||||
logger.Error(err, "failed to decode status")
|
||||
return
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
if err := handler(types.StatusModified, obj); err != nil {
|
||||
klog.Errorf("failed to handle status event %s, %v", evt, err)
|
||||
if err := handler(ctx, types.StatusModified, obj); err != nil {
|
||||
logger.Error(err, "failed to handle status event", "event", evt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -215,6 +220,8 @@ func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents
|
||||
func (c *CloudEventSourceClient[T]) respondResyncSpecRequest(
|
||||
ctx context.Context, evtDataType types.CloudEventsDataType, evt cloudevents.Event,
|
||||
) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
resourceVersions, err := payload.DecodeSpecResyncRequest(evt)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -244,7 +251,7 @@ func (c *CloudEventSourceClient[T]) respondResyncSpecRequest(
|
||||
// TODO we cannot list objs now, the lister may be not ready, we may need to add HasSynced
|
||||
// for the lister
|
||||
if len(objs) == 0 {
|
||||
klog.V(4).Infof("there are is no objs from the list, do nothing")
|
||||
logger.V(4).Info("there are is no objs from the list, do nothing")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -260,7 +267,7 @@ func (c *CloudEventSourceClient[T]) respondResyncSpecRequest(
|
||||
lastResourceVersion := findResourceVersion(string(obj.GetUID()), resourceVersions.Versions)
|
||||
currentResourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("ignore the obj %v since it has a invalid resourceVersion, %v", obj, err)
|
||||
logger.V(4).Info("ignore the obj since it has a invalid resourceVersion", "object", obj, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -290,7 +297,7 @@ func (c *CloudEventSourceClient[T]) respondResyncSpecRequest(
|
||||
if err := c.publish(ctx, evt); err != nil {
|
||||
return err
|
||||
}
|
||||
increaseCloudEventsSentFromSourceCounter(evt.Source(), fmt.Sprintf("%s", clusterName), evtDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
metrics.IncreaseCloudEventsSentFromSourceCounter(evt.Source(), fmt.Sprintf("%s", clusterName), evtDataType.String(), string(eventType.SubResource), string(eventType.Action))
|
||||
}
|
||||
|
||||
return nil
|
||||
6
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go
generated
vendored
6
vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go
generated
vendored
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
// ResourceHandler handles the received resource object.
|
||||
type ResourceHandler[T ResourceObject] func(action types.ResourceAction, obj T) error
|
||||
type ResourceHandler[T ResourceObject] func(ctx context.Context, action types.ResourceAction, obj T) error
|
||||
|
||||
// StatusHashGetter gets the status hash of one resource object.
|
||||
type StatusHashGetter[T ResourceObject] func(obj T) (string, error)
|
||||
@@ -25,8 +25,12 @@ type ResourceObject interface {
|
||||
// GetResourceVersion returns the resource version of this object. The resource version is a required int64 sequence
|
||||
// number property that must be incremented by the source whenever this resource changes.
|
||||
// The source should guarantee its incremental nature.
|
||||
// Deprecated: use GetGeneration() instead.
|
||||
GetResourceVersion() string
|
||||
|
||||
// GetGeneration returns the generation number of this object to reflect the spec change of the resource.
|
||||
GetGeneration() int64
|
||||
|
||||
// GetDeletionTimestamp returns the deletion timestamp of this object. The deletiontimestamp is an optional
|
||||
// timestamp property representing the resource is deleting from the source, the agent needs to clean up the
|
||||
// resource from its cluster.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package generic
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"time"
|
||||
@@ -26,7 +26,7 @@ const (
|
||||
metricsWorkCodeLabel = "code"
|
||||
)
|
||||
|
||||
const noneOriginalSource = "none"
|
||||
const NoneOriginalSource = "none"
|
||||
|
||||
// cloudeventsReceivedBySourceMetricsLabels - Array of labels added to cloudevents received by source metrics:
|
||||
var cloudeventsReceivedBySourceMetricsLabels = []string{
|
||||
@@ -97,7 +97,7 @@ const (
|
||||
// another for resource update would result in the following metrics:
|
||||
// cloudevents_received_total{source="agent1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifests",subresource="spec",action="create"} 1
|
||||
// cloudevents_received_total{source="agent1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifests",subresource="spec",action="update"} 1
|
||||
var cloudeventsReceivedBySourceCounterMetric = prometheus.NewCounterVec(
|
||||
var CloudeventsReceivedBySourceCounterMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: cloudeventsMetricsSubsystem,
|
||||
Name: receivedCounterMetric,
|
||||
@@ -112,7 +112,7 @@ var cloudeventsReceivedBySourceCounterMetric = prometheus.NewCounterVec(
|
||||
// another for resource update would result in the following metrics:
|
||||
// cloudevents_received_total{source="source1",type="io.open-cluster-management.works.v1alpha1.manifests",subresource="spec",action="create"} 1
|
||||
// cloudevents_received_total{source="source1",type="io.open-cluster-management.works.v1alpha1.manifests",subresource="spec",action="update"} 1
|
||||
var cloudeventsReceivedByClientCounterMetric = prometheus.NewCounterVec(
|
||||
var CloudeventsReceivedByClientCounterMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: cloudeventsMetricsSubsystem,
|
||||
Name: receivedCounterMetric,
|
||||
@@ -125,7 +125,7 @@ var cloudeventsReceivedByClientCounterMetric = prometheus.NewCounterVec(
|
||||
// and a help string of 'The total number of CloudEvents sent from source.'
|
||||
// For example, 1 cloudevent sent from source1 to consumer1 with data type manifestbundles for resource spec create would result in the following metrics:
|
||||
// cloudevents_sent_total{source="source1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",subresource="spec",action="create"} 1
|
||||
var cloudeventsSentFromSourceCounterMetric = prometheus.NewCounterVec(
|
||||
var CloudeventsSentFromSourceCounterMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: cloudeventsMetricsSubsystem,
|
||||
Name: sentCounterMetric,
|
||||
@@ -138,7 +138,7 @@ var cloudeventsSentFromSourceCounterMetric = prometheus.NewCounterVec(
|
||||
// and a help string of 'The total number of CloudEvents sent from agent.'
|
||||
// For example, 2 CloudEvents sent from consumer1-work-agent back to source1 for resource status update would result in the following metrics:
|
||||
// cloudevents_sent_total{source="consumer1-work-agent",original_source="source1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",subresource="status",action="update"} 2
|
||||
var cloudeventsSentFromClientCounterMetric = prometheus.NewCounterVec(
|
||||
var CloudeventsSentFromClientCounterMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: cloudeventsMetricsSubsystem,
|
||||
Name: sentCounterMetric,
|
||||
@@ -163,7 +163,7 @@ var cloudeventsSentFromClientCounterMetric = prometheus.NewCounterVec(
|
||||
// resource_spec_resync_duration_seconds_bucket{source="source1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifests",le="+Inf"} 2
|
||||
// resource_spec_resync_duration_seconds_sum{source="source1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifests"} 1.2
|
||||
// resource_spec_resync_duration_seconds_count{source="source1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifests"} 2
|
||||
var resourceSpecResyncDurationMetric = prometheus.NewHistogramVec(
|
||||
var ResourceSpecResyncDurationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: resourcesMetricsSubsystem,
|
||||
Name: specResyncDurationMetric,
|
||||
@@ -197,7 +197,7 @@ var resourceSpecResyncDurationMetric = prometheus.NewHistogramVec(
|
||||
// resource_status_resync_duration_seconds_bucket{source="source1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="+Inf"} 2
|
||||
// resource_status_resync_duration_seconds_sum{source="source1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifestbundles"} 1.6
|
||||
// resource_status_resync_duration_seconds_count{source="source1",consumer="consumer1",type="io.open-cluster-management.works.v1alpha1.manifestbundles"} 2
|
||||
var resourceStatusResyncDurationMetric = prometheus.NewHistogramVec(
|
||||
var ResourceStatusResyncDurationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: resourcesMetricsSubsystem,
|
||||
Name: statusResyncDurationMetric,
|
||||
@@ -219,7 +219,7 @@ var resourceStatusResyncDurationMetric = prometheus.NewHistogramVec(
|
||||
// and a help string of 'The total number of reconnects for the CloudEvents client.'
|
||||
// For example, 2 reconnects for the CloudEvents client with client_id=client1 would result in the following metrics:
|
||||
// client_reconnected_total{client_id="client1"} 2
|
||||
var clientReconnectedCounterMetric = prometheus.NewCounterVec(
|
||||
var ClientReconnectedCounterMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: cloudeventsMetricsSubsystem,
|
||||
Name: clientReconnectedCounter,
|
||||
@@ -239,38 +239,38 @@ var workProcessedCounterMetric = prometheus.NewCounterVec(
|
||||
|
||||
// Register the metrics
|
||||
func RegisterClientCloudEventsMetrics(register prometheus.Registerer) {
|
||||
register.MustRegister(cloudeventsReceivedByClientCounterMetric)
|
||||
register.MustRegister(cloudeventsSentFromClientCounterMetric)
|
||||
register.MustRegister(resourceStatusResyncDurationMetric)
|
||||
register.MustRegister(CloudeventsReceivedByClientCounterMetric)
|
||||
register.MustRegister(CloudeventsSentFromClientCounterMetric)
|
||||
register.MustRegister(ResourceStatusResyncDurationMetric)
|
||||
register.MustRegister(workProcessedCounterMetric)
|
||||
}
|
||||
|
||||
// Register the metrics
|
||||
func RegisterSourceCloudEventsMetrics(register prometheus.Registerer) {
|
||||
register.MustRegister(cloudeventsReceivedBySourceCounterMetric)
|
||||
register.MustRegister(cloudeventsSentFromSourceCounterMetric)
|
||||
register.MustRegister(resourceSpecResyncDurationMetric)
|
||||
register.MustRegister(clientReconnectedCounterMetric)
|
||||
register.MustRegister(CloudeventsReceivedBySourceCounterMetric)
|
||||
register.MustRegister(CloudeventsSentFromSourceCounterMetric)
|
||||
register.MustRegister(ResourceSpecResyncDurationMetric)
|
||||
register.MustRegister(ClientReconnectedCounterMetric)
|
||||
}
|
||||
|
||||
// ResetSourceCloudEventsMetrics resets all collectors from source
|
||||
func ResetSourceCloudEventsMetrics() {
|
||||
cloudeventsReceivedBySourceCounterMetric.Reset()
|
||||
cloudeventsSentFromSourceCounterMetric.Reset()
|
||||
resourceSpecResyncDurationMetric.Reset()
|
||||
clientReconnectedCounterMetric.Reset()
|
||||
CloudeventsReceivedBySourceCounterMetric.Reset()
|
||||
CloudeventsSentFromSourceCounterMetric.Reset()
|
||||
ResourceSpecResyncDurationMetric.Reset()
|
||||
ClientReconnectedCounterMetric.Reset()
|
||||
}
|
||||
|
||||
// ResetClientCloudEventsMetrics resets all collectors from client
|
||||
func ResetClientCloudEventsMetrics() {
|
||||
cloudeventsReceivedByClientCounterMetric.Reset()
|
||||
cloudeventsSentFromClientCounterMetric.Reset()
|
||||
resourceStatusResyncDurationMetric.Reset()
|
||||
CloudeventsReceivedByClientCounterMetric.Reset()
|
||||
CloudeventsSentFromClientCounterMetric.Reset()
|
||||
ResourceStatusResyncDurationMetric.Reset()
|
||||
workProcessedCounterMetric.Reset()
|
||||
}
|
||||
|
||||
// increaseCloudEventsReceivedBySourceCounter increases the cloudevents received by source counter metric:
|
||||
func increaseCloudEventsReceivedBySourceCounter(source, consumer, dataType, subresource, action string) {
|
||||
// IncreaseCloudEventsReceivedBySourceCounter increases the cloudevents received by source counter metric:
|
||||
func IncreaseCloudEventsReceivedBySourceCounter(source, consumer, dataType, subresource, action string) {
|
||||
labels := prometheus.Labels{
|
||||
metricsSourceLabel: source,
|
||||
metricsConsumerLabel: consumer,
|
||||
@@ -278,22 +278,22 @@ func increaseCloudEventsReceivedBySourceCounter(source, consumer, dataType, subr
|
||||
metricsSubResourceLabel: subresource,
|
||||
metricsActionLabel: action,
|
||||
}
|
||||
cloudeventsReceivedBySourceCounterMetric.With(labels).Inc()
|
||||
CloudeventsReceivedBySourceCounterMetric.With(labels).Inc()
|
||||
}
|
||||
|
||||
// increaseCloudEventsReceivedByAgentCounter increases the cloudevents received by agent counter metric:
|
||||
func increaseCloudEventsReceivedByAgentCounter(source, dataType, subresource, action string) {
|
||||
// IncreaseCloudEventsReceivedByAgentCounter increases the cloudevents received by agent counter metric:
|
||||
func IncreaseCloudEventsReceivedByAgentCounter(source, dataType, subresource, action string) {
|
||||
labels := prometheus.Labels{
|
||||
metricsSourceLabel: source,
|
||||
metricsDataTypeLabel: dataType,
|
||||
metricsSubResourceLabel: subresource,
|
||||
metricsActionLabel: action,
|
||||
}
|
||||
cloudeventsReceivedByClientCounterMetric.With(labels).Inc()
|
||||
CloudeventsReceivedByClientCounterMetric.With(labels).Inc()
|
||||
}
|
||||
|
||||
// increaseCloudEventsSentFromSourceCounter increases the cloudevents sent from source counter metric:
|
||||
func increaseCloudEventsSentFromSourceCounter(source, consumer, dataType, subresource, action string) {
|
||||
// IncreaseCloudEventsSentFromSourceCounter increases the cloudevents sent from source counter metric:
|
||||
func IncreaseCloudEventsSentFromSourceCounter(source, consumer, dataType, subresource, action string) {
|
||||
labels := prometheus.Labels{
|
||||
metricsSourceLabel: source,
|
||||
metricsConsumerLabel: consumer,
|
||||
@@ -301,13 +301,13 @@ func increaseCloudEventsSentFromSourceCounter(source, consumer, dataType, subres
|
||||
metricsSubResourceLabel: subresource,
|
||||
metricsActionLabel: action,
|
||||
}
|
||||
cloudeventsSentFromSourceCounterMetric.With(labels).Inc()
|
||||
CloudeventsSentFromSourceCounterMetric.With(labels).Inc()
|
||||
}
|
||||
|
||||
// increaseCloudEventsSentFromAgentCounter increases the cloudevents sent from agent counter metric:
|
||||
func increaseCloudEventsSentFromAgentCounter(source, originalSource, dataType, subresource, action string) {
|
||||
// IncreaseCloudEventsSentFromAgentCounter increases the cloudevents sent from agent counter metric:
|
||||
func IncreaseCloudEventsSentFromAgentCounter(source, originalSource, dataType, subresource, action string) {
|
||||
if originalSource == "" {
|
||||
originalSource = noneOriginalSource
|
||||
originalSource = NoneOriginalSource
|
||||
}
|
||||
labels := prometheus.Labels{
|
||||
metricsSourceLabel: source,
|
||||
@@ -316,37 +316,37 @@ func increaseCloudEventsSentFromAgentCounter(source, originalSource, dataType, s
|
||||
metricsSubResourceLabel: subresource,
|
||||
metricsActionLabel: action,
|
||||
}
|
||||
cloudeventsSentFromClientCounterMetric.With(labels).Inc()
|
||||
CloudeventsSentFromClientCounterMetric.With(labels).Inc()
|
||||
}
|
||||
|
||||
// updateResourceSpecResyncDurationMetric updates the resource spec resync duration metric:
|
||||
func updateResourceSpecResyncDurationMetric(source, consumer, dataType string, startTime time.Time) {
|
||||
// UpdateResourceSpecResyncDurationMetric updates the resource spec resync duration metric:
|
||||
func UpdateResourceSpecResyncDurationMetric(source, consumer, dataType string, startTime time.Time) {
|
||||
labels := prometheus.Labels{
|
||||
metricsSourceLabel: source,
|
||||
metricsConsumerLabel: consumer,
|
||||
metricsDataTypeLabel: dataType,
|
||||
}
|
||||
duration := time.Since(startTime)
|
||||
resourceSpecResyncDurationMetric.With(labels).Observe(duration.Seconds())
|
||||
ResourceSpecResyncDurationMetric.With(labels).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// updateResourceStatusResyncDurationMetric updates the resource status resync duration metric:
|
||||
func updateResourceStatusResyncDurationMetric(source, consumer, dataType string, startTime time.Time) {
|
||||
// UpdateResourceStatusResyncDurationMetric updates the resource status resync duration metric:
|
||||
func UpdateResourceStatusResyncDurationMetric(source, consumer, dataType string, startTime time.Time) {
|
||||
labels := prometheus.Labels{
|
||||
metricsSourceLabel: source,
|
||||
metricsConsumerLabel: consumer,
|
||||
metricsDataTypeLabel: dataType,
|
||||
}
|
||||
duration := time.Since(startTime)
|
||||
resourceStatusResyncDurationMetric.With(labels).Observe(duration.Seconds())
|
||||
ResourceStatusResyncDurationMetric.With(labels).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// increaseClientReconnectedCounter increases the client reconnected counter metric:
|
||||
func increaseClientReconnectedCounter(clientID string) {
|
||||
// IncreaseClientReconnectedCounter increases the client reconnected counter metric:
|
||||
func IncreaseClientReconnectedCounter(clientID string) {
|
||||
labels := prometheus.Labels{
|
||||
metricsClientIDLabel: clientID,
|
||||
}
|
||||
clientReconnectedCounterMetric.With(labels).Inc()
|
||||
ClientReconnectedCounterMetric.With(labels).Inc()
|
||||
}
|
||||
|
||||
// IncreaseWorkProcessedCounter increases the work processed counter metric:
|
||||
@@ -1,4 +1,4 @@
|
||||
package generic
|
||||
package builder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -6,8 +6,9 @@ import (
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/constants"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt"
|
||||
grpcv2 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/v2/grpc"
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
)
|
||||
|
||||
// ConfigLoader loads a configuration object with a configuration file.
|
||||
@@ -21,7 +22,6 @@ type ConfigLoader struct {
|
||||
// Available configuration types:
|
||||
// - mqtt
|
||||
// - grpc
|
||||
// - kafka
|
||||
func NewConfigLoader(configType, configPath string) *ConfigLoader {
|
||||
return &ConfigLoader{
|
||||
configType: configType,
|
||||
@@ -46,49 +46,32 @@ func (l *ConfigLoader) LoadConfig() (string, any, error) {
|
||||
}
|
||||
|
||||
return grpcOptions.Dialer.URL, grpcOptions, nil
|
||||
|
||||
case constants.ConfigTypeKafka:
|
||||
kafkaOptions, err := kafka.BuildKafkaOptionsFromFlags(l.configPath)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
val, found := kafkaOptions.ConfigMap["bootstrap.servers"]
|
||||
if found {
|
||||
server, ok := val.(string)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("failed to get kafka bootstrap.servers from configMap")
|
||||
}
|
||||
return server, kafkaOptions, nil
|
||||
}
|
||||
return "", nil, fmt.Errorf("failed to get kafka bootstrap.servers from configMap")
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("unsupported config type %s", l.configType)
|
||||
}
|
||||
|
||||
// BuildCloudEventsSourceOptions builds the cloudevents source options based on the broker type
|
||||
func BuildCloudEventsSourceOptions(config any, clientId, sourceId string) (*options.CloudEventsSourceOptions, error) {
|
||||
func BuildCloudEventsSourceOptions(config any,
|
||||
clientId, sourceId string, dataType types.CloudEventsDataType) (*options.CloudEventsSourceOptions, error) {
|
||||
switch config := config.(type) {
|
||||
case *mqtt.MQTTOptions:
|
||||
return mqtt.NewSourceOptions(config, clientId, sourceId), nil
|
||||
case *grpc.GRPCOptions:
|
||||
return grpc.NewSourceOptions(config, sourceId), nil
|
||||
case *kafka.KafkaOptions:
|
||||
return kafka.NewSourceOptions(config, sourceId), nil
|
||||
return grpcv2.NewSourceOptions(config, sourceId, dataType), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported client configuration type %T", config)
|
||||
}
|
||||
}
|
||||
|
||||
// BuildCloudEventsAgentOptions builds the cloudevents agent options based on the broker type
|
||||
func BuildCloudEventsAgentOptions(config any, clusterName, clientId string) (*options.CloudEventsAgentOptions, error) {
|
||||
func BuildCloudEventsAgentOptions(config any,
|
||||
clusterName, clientId string, dataType types.CloudEventsDataType) (*options.CloudEventsAgentOptions, error) {
|
||||
switch config := config.(type) {
|
||||
case *mqtt.MQTTOptions:
|
||||
return mqtt.NewAgentOptions(config, clusterName, clientId), nil
|
||||
case *grpc.GRPCOptions:
|
||||
return grpc.NewAgentOptions(config, clusterName, clientId), nil
|
||||
case *kafka.KafkaOptions:
|
||||
return kafka.NewAgentOptions(config, clusterName, clientId), nil
|
||||
return grpcv2.NewAgentOptions(config, clusterName, clientId, dataType), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported client configuration type %T", config)
|
||||
}
|
||||
@@ -60,10 +60,12 @@ func (c *clientCertRotating) run(ctx context.Context) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
klog.V(3).Infof("Starting client certificate rotation controller")
|
||||
defer klog.V(3).Infof("Shutting down client certificate rotation controller")
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
go wait.Until(c.runWorker, time.Second, ctx.Done())
|
||||
logger.V(3).Info("Starting client certificate rotation controller")
|
||||
defer logger.V(3).Info("Shutting down client certificate rotation controller")
|
||||
|
||||
go wait.UntilWithContext(ctx, c.runWorker, time.Second)
|
||||
|
||||
go func() {
|
||||
if err := wait.PollUntilContextCancel(
|
||||
@@ -82,19 +84,19 @@ func (c *clientCertRotating) run(ctx context.Context) {
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (c *clientCertRotating) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
func (c *clientCertRotating) runWorker(ctx context.Context) {
|
||||
for c.processNextWorkItem(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clientCertRotating) processNextWorkItem() bool {
|
||||
func (c *clientCertRotating) processNextWorkItem(ctx context.Context) bool {
|
||||
dsKey, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(dsKey)
|
||||
|
||||
err := c.loadClientCert()
|
||||
err := c.loadClientCert(ctx)
|
||||
if err == nil {
|
||||
c.queue.Forget(dsKey)
|
||||
return true
|
||||
@@ -107,7 +109,8 @@ func (c *clientCertRotating) processNextWorkItem() bool {
|
||||
}
|
||||
|
||||
// loadClientCert calls the callback and rotates connections if needed
|
||||
func (c *clientCertRotating) loadClientCert() error {
|
||||
func (c *clientCertRotating) loadClientCert(ctx context.Context) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
cert, err := c.reload(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -135,7 +138,7 @@ func (c *clientCertRotating) loadClientCert() error {
|
||||
return fmt.Errorf("no connection close function set")
|
||||
}
|
||||
|
||||
klog.V(1).Infof("certificate rotation detected, shutting down client connections to start using new credentials")
|
||||
logger.V(1).Info("certificate rotation detected, shutting down client connections to start using new credentials")
|
||||
c.conn.Close()
|
||||
|
||||
return nil
|
||||
|
||||
@@ -11,30 +11,32 @@ import (
|
||||
"open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types"
|
||||
)
|
||||
|
||||
type grpcAgentOptions struct {
|
||||
type grpcAgentTransport struct {
|
||||
GRPCOptions
|
||||
errorChan chan error // grpc client connection doesn't have error channel, it will handle reconnecting automatically
|
||||
clusterName string
|
||||
errorChan chan error
|
||||
clusterName string
|
||||
protocol *protocol.Protocol
|
||||
cloudEventsClient cloudevents.Client
|
||||
dataType types.CloudEventsDataType
|
||||
}
|
||||
|
||||
func NewAgentOptions(grpcOptions *GRPCOptions, clusterName, agentID string) *options.CloudEventsAgentOptions {
|
||||
// Deprecated: use v2.grpc.NewAgentOptions instead
|
||||
func NewAgentOptions(grpcOptions *GRPCOptions,
|
||||
clusterName, agentID string, dataType types.CloudEventsDataType) *options.CloudEventsAgentOptions {
|
||||
return &options.CloudEventsAgentOptions{
|
||||
CloudEventsOptions: &grpcAgentOptions{
|
||||
CloudEventsTransport: &grpcAgentTransport{
|
||||
GRPCOptions: *grpcOptions,
|
||||
errorChan: make(chan error),
|
||||
clusterName: clusterName,
|
||||
dataType: dataType,
|
||||
},
|
||||
AgentID: agentID,
|
||||
ClusterName: clusterName,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *grpcAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) {
|
||||
// grpc agent client doesn't need to update topic in the context
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (o *grpcAgentOptions) Protocol(ctx context.Context, dataType types.CloudEventsDataType) (options.CloudEventsProtocol, error) {
|
||||
func (o *grpcAgentTransport) Connect(ctx context.Context) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
opts := []protocol.Option{
|
||||
protocol.WithSubscribeOption(&protocol.SubscribeOption{
|
||||
// TODO: Update this code to determine the subscription source for the agent client.
|
||||
@@ -42,7 +44,7 @@ func (o *grpcAgentOptions) Protocol(ctx context.Context, dataType types.CloudEve
|
||||
// as a placeholder with all the sources.
|
||||
Source: types.SourceAll,
|
||||
ClusterName: o.clusterName,
|
||||
DataType: dataType.String(),
|
||||
DataType: o.dataType.String(),
|
||||
}),
|
||||
protocol.WithReconnectErrorChan(o.errorChan),
|
||||
}
|
||||
@@ -57,17 +59,44 @@ func (o *grpcAgentOptions) Protocol(ctx context.Context, dataType types.CloudEve
|
||||
select {
|
||||
case o.errorChan <- err:
|
||||
default:
|
||||
klog.Errorf("no error channel available to report error: %v", err)
|
||||
logger.Error(err, "no error channel available to report error")
|
||||
}
|
||||
},
|
||||
opts...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return receiver, nil
|
||||
|
||||
o.protocol = receiver
|
||||
o.cloudEventsClient, err = cloudevents.NewClient(o.protocol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *grpcAgentOptions) ErrorChan() <-chan error {
|
||||
func (o *grpcAgentTransport) Send(ctx context.Context, evt cloudevents.Event) error {
|
||||
if err := o.cloudEventsClient.Send(ctx, evt); cloudevents.IsUndelivered(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *grpcAgentTransport) Subscribe(ctx context.Context) error {
|
||||
// Subscription is handled by the cloudevents client during receiver startup.
|
||||
// No action needed here.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *grpcAgentTransport) Receive(ctx context.Context, fn options.ReceiveHandlerFn) error {
|
||||
return o.cloudEventsClient.StartReceiver(ctx, fn)
|
||||
}
|
||||
|
||||
func (o *grpcAgentTransport) Close(ctx context.Context) error {
|
||||
return o.protocol.Close(ctx)
|
||||
}
|
||||
|
||||
func (o *grpcAgentTransport) ErrorChan() <-chan error {
|
||||
return o.errorChan
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user