mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
96 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcbaad8495 | ||
|
|
3346319082 | ||
|
|
139aa43c1c | ||
|
|
11fdd40e41 | ||
|
|
c4ce86cb0b | ||
|
|
dfe7e9b3ca | ||
|
|
1c29bfc084 | ||
|
|
c48e2bb8bb | ||
|
|
df40b5d02e | ||
|
|
aa26a2222b | ||
|
|
f9d1a967c7 | ||
|
|
b2e1d3f0dd | ||
|
|
24478a9dd4 | ||
|
|
160525bd1f | ||
|
|
d9158ab602 | ||
|
|
2b4cc64026 | ||
|
|
ccd7dcb867 | ||
|
|
5c5c555a7f | ||
|
|
273e4768f3 | ||
|
|
69e359e9fc | ||
|
|
e5352df348 | ||
|
|
f2b4e8e6c6 | ||
|
|
99a38bff8e | ||
|
|
d0aa627715 | ||
|
|
953cbe9d28 | ||
|
|
f7873aba7b | ||
|
|
f9728ecfff | ||
|
|
96a44153de | ||
|
|
cafbcbd2cb | ||
|
|
6397a35e32 | ||
|
|
aea8592880 | ||
|
|
2aa514a34c | ||
|
|
ac39bc4eba | ||
|
|
284d21686e | ||
|
|
00c0c11c76 | ||
|
|
96ebfa8e62 | ||
|
|
95d442d80f | ||
|
|
e4e58882ab | ||
|
|
ea71fc0eec | ||
|
|
462b225d92 | ||
|
|
d8728092f8 | ||
|
|
2c8ef70c43 | ||
|
|
4d2c8a451e | ||
|
|
f7927c85b1 | ||
|
|
2e2fd2a11b | ||
|
|
0e6ec1d36b | ||
|
|
85b33d9104 | ||
|
|
c838ecbbc7 | ||
|
|
068a5c1e64 | ||
|
|
4d559a1864 | ||
|
|
322142dd66 | ||
|
|
39f37b706c | ||
|
|
4e10dd4f80 | ||
|
|
ccaa600ff4 | ||
|
|
a3fcfeb62f | ||
|
|
d2cbbafeb1 | ||
|
|
eaf8e16414 | ||
|
|
5a65cf9f6d | ||
|
|
a8a68ae1b0 | ||
|
|
7643a27fb1 | ||
|
|
71fdb53c2e | ||
|
|
d6312f6f83 | ||
|
|
19220f5e6e | ||
|
|
05456b0905 | ||
|
|
10328dee8d | ||
|
|
fd174ed691 | ||
|
|
2e47f1740c | ||
|
|
15cb96f945 | ||
|
|
1e987db54d | ||
|
|
12a7fed3ae | ||
|
|
f18fac66c2 | ||
|
|
b5c95f9cbf | ||
|
|
46b948388f | ||
|
|
78be58b090 | ||
|
|
54a8e0683b | ||
|
|
702f0caa93 | ||
|
|
2e709e85ae | ||
|
|
debfd57a91 | ||
|
|
c3b8af34ac | ||
|
|
7a65bcb35b | ||
|
|
af6cd9e37c | ||
|
|
344004d0b3 | ||
|
|
a5bc586f09 | ||
|
|
81ca7ab601 | ||
|
|
69c9ccb2ea | ||
|
|
0ec3effab8 | ||
|
|
dba42e91bc | ||
|
|
68fd3bebe5 | ||
|
|
52b975ef0d | ||
|
|
0679af76f4 | ||
|
|
309c10f632 | ||
|
|
07ddec9fd1 | ||
|
|
69a80fd1d9 | ||
|
|
04975de060 | ||
|
|
459a808371 | ||
|
|
ef8a335c93 |
18
.github/workflows/pull_request.yaml
vendored
18
.github/workflows/pull_request.yaml
vendored
@@ -19,7 +19,9 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
@@ -34,10 +36,12 @@ jobs:
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.33
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Helm Lint
|
||||
run: |
|
||||
@@ -111,7 +115,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Failure
|
||||
@@ -131,4 +135,4 @@ jobs:
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
|
||||
17
.github/workflows/push.yaml
vendored
17
.github/workflows/push.yaml
vendored
@@ -39,11 +39,13 @@ jobs:
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.33
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
@@ -127,6 +129,11 @@ jobs:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
- name: Helm Template
|
||||
run: |
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests/ && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -8,4 +8,5 @@ _gopath/
|
||||
.DS_Store
|
||||
.vscode
|
||||
vendor
|
||||
dist
|
||||
dist
|
||||
Reloader
|
||||
@@ -1,5 +1,8 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.15.2 as builder
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.16 as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -16,14 +19,14 @@ COPY internal/ internal/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER nonroot:nonroot
|
||||
USER 65532:65532
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
44
Makefile
44
Makefile
@@ -1,6 +1,6 @@
|
||||
# note: call scripts from /scripts
|
||||
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
|
||||
.PHONY: default build build-image test stop push apply deploy release release-all manifest push
|
||||
|
||||
OS ?= linux
|
||||
ARCH ?= ???
|
||||
@@ -9,12 +9,12 @@ ALL_ARCH ?= arm64 arm amd64
|
||||
BUILDER ?= reloader-builder-${ARCH}
|
||||
BINARY ?= Reloader
|
||||
DOCKER_IMAGE ?= stakater/reloader
|
||||
# Default value "dev"
|
||||
TAG ?= v0.0.75.0
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
|
||||
|
||||
# Default value "dev"
|
||||
VERSION ?= 0.0.1
|
||||
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH}
|
||||
BUILD=
|
||||
|
||||
GOCMD = go
|
||||
@@ -26,23 +26,19 @@ default: build test
|
||||
install:
|
||||
"$(GOCMD)" mod download
|
||||
|
||||
run:
|
||||
go run ./main.go
|
||||
|
||||
build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
builder-image:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
|
||||
reloader-${ARCH}.tar:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
|
||||
|
||||
binary-image: builder-image
|
||||
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
|
||||
build-image:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${REPOSITORY_ARCH}" --load -f Dockerfile .
|
||||
|
||||
push:
|
||||
docker push ${REPOSITORY_ARCH}
|
||||
|
||||
release: binary-image push manifest
|
||||
release: build-image push manifest
|
||||
|
||||
release-all:
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
@@ -66,23 +62,6 @@ test:
|
||||
stop:
|
||||
@docker stop "${BINARY}"
|
||||
|
||||
clean-images: stop
|
||||
-docker rmi "${BINARY}"
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Clean image: $$arch ; \
|
||||
make clean-image ARCH=$$arch ; \
|
||||
done
|
||||
-docker rmi "${REPOSITORY_GENERIC}"
|
||||
|
||||
clean-image:
|
||||
-docker rmi "${BUILDER}"
|
||||
-docker rmi "${REPOSITORY_ARCH}"
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
|
||||
clean:
|
||||
"$(GOCMD)" clean -i
|
||||
-rm -rf reloader-*.tar
|
||||
|
||||
apply:
|
||||
kubectl apply -f deployments/manifests/ -n temp-reloader
|
||||
|
||||
@@ -93,3 +72,4 @@ bump-chart:
|
||||
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
|
||||
31
README.md
31
README.md
@@ -99,7 +99,8 @@ metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### Secret
|
||||
@@ -114,7 +115,8 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
Use comma separated list to define multiple secrets.
|
||||
@@ -125,7 +127,8 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### NOTES
|
||||
@@ -142,6 +145,19 @@ spec:
|
||||
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
|
||||
## Reload Strategies
|
||||
Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
|
||||
- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers
|
||||
referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.).
|
||||
This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
|
||||
- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation
|
||||
on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools
|
||||
to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its
|
||||
`configMap` or `secret` be deleted and recreated.
|
||||
This strategy can be specified with the `--reload-strategy=annotations` argument.
|
||||
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
@@ -219,7 +235,14 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` to `true` in values.yaml file.
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
|
||||
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- | ---------------------------------------------------------------------------- | ------- |
|
||||
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
|
||||
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
## Help
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
FROM golang:1.15.2-alpine
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
ARG GOARCH=amd64
|
||||
|
||||
RUN apk -v --update \
|
||||
--no-cache \
|
||||
add git build-base
|
||||
|
||||
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
|
||||
|
||||
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
|
||||
COPY build/package/Dockerfile.run /
|
||||
|
||||
# Running this image produces a tarball suitable to be piped into another
|
||||
# Docker build command.
|
||||
CMD tar -cf - -C / Dockerfile.run Reloader
|
||||
@@ -1,14 +0,0 @@
|
||||
FROM alpine:3.11
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
RUN apk add --update --no-cache ca-certificates
|
||||
|
||||
COPY Reloader /bin/Reloader
|
||||
|
||||
# On alpine 'nobody' has uid 65534
|
||||
USER 65534
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/bin/Reloader"]
|
||||
@@ -3,14 +3,14 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: v0.0.83
|
||||
appVersion: v0.0.83
|
||||
version: v0.0.103
|
||||
appVersion: v0.0.103
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
- https://github.com/stakater/Reloader
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
- For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
{{ .Values.reloader.custom_annotations.configmap | default "configmap.reloader.stakater.com/reload" }}: "foo-configmap"
|
||||
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
{{ .Values.reloader.custom_annotations.secret | default "secret.reloader.stakater.com/reload" }}: "foo-secret"
|
||||
|
||||
- After successful installation, your pods will get rolling updates when a change in data of configmap or secret will happen.
|
||||
|
||||
@@ -32,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -43,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
|
||||
@@ -51,6 +51,9 @@ spec:
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
@@ -97,17 +100,25 @@ spec:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
|
||||
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (ne .Values.reloader.reloadStrategy "default")}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -144,6 +155,12 @@ spec:
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.isArgoRollouts true }}
|
||||
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
|
||||
{{- end }}
|
||||
{{- if ne .Values.reloader.reloadStrategy "default" }}
|
||||
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.podMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.podMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ .Values.reloader.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -32,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -43,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
|
||||
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloader": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloadStrategy": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"default",
|
||||
"env-vars",
|
||||
"annotations"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,9 +9,11 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
@@ -51,10 +53,10 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.77
|
||||
version: v0.0.103
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: v0.0.83
|
||||
tag: v0.0.103
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -65,6 +67,18 @@ reloader:
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
|
||||
# Liveness and readiness probe timeout values.
|
||||
livenessProbe: {}
|
||||
# timeoutSeconds: 5
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
readinessProbe: {}
|
||||
# timeoutSeconds: 15
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
|
||||
# Specify resource requests/limits for the deployment.
|
||||
# Example:
|
||||
# resources:
|
||||
@@ -77,6 +91,7 @@ reloader:
|
||||
resources: {}
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
@@ -94,15 +109,17 @@ reloader:
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
name:
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
# Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor
|
||||
# Enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
@@ -112,4 +129,14 @@ reloader:
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,10 +9,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
@@ -46,4 +46,3 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,10 +9,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
@@ -23,4 +23,3 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
|
||||
@@ -8,14 +8,13 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.103
|
||||
name: reloader-reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
@@ -28,17 +27,16 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.103
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
- image: "stakater/reloader:v0.0.103"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
@@ -49,13 +47,19 @@ spec:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
|
||||
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Source: reloader/templates/podmonitor.yaml
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -9,9 +8,8 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
|
||||
|
||||
@@ -1,7 +1,22 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,10 +24,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
@@ -46,11 +61,10 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -58,10 +72,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
@@ -72,7 +86,6 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
@@ -83,14 +96,13 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.103
|
||||
name: reloader-reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
@@ -103,17 +115,16 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.103"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.103
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
- image: "stakater/reloader:v0.0.103"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
@@ -124,46 +135,19 @@ spec:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
@@ -113,3 +114,15 @@ reloader:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
62
go.mod
62
go.mod
@@ -1,27 +1,49 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.15
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v0.7.2
|
||||
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.10.2 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible
|
||||
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
||||
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
|
||||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
|
||||
github.com/argoproj/argo-rollouts v1.0.2
|
||||
github.com/onsi/ginkgo v1.15.1 // indirect
|
||||
github.com/onsi/gomega v1.11.0 // indirect
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
k8s.io/api v0.21.2
|
||||
k8s.io/apimachinery v0.21.2
|
||||
k8s.io/client-go v0.21.2
|
||||
)
|
||||
|
||||
// Replacements for argo-rollouts
|
||||
replace (
|
||||
github.com/openshift/api => github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible // prebase-1.16
|
||||
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372 // prebase-1.16
|
||||
k8s.io/api => k8s.io/api v0.0.0-20191004120104-195af9ec3521 // release-1.16
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 // kubernetes-1.16.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 // kubernetes-1.16.0
|
||||
)
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
k8s.io/api => k8s.io/api v0.20.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.20.4
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.20.4
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.20.4
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.20.4
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.20.4
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
|
||||
k8s.io/metrics => k8s.io/metrics v0.20.4
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.4
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.20.4
|
||||
)
|
||||
@@ -1,6 +1,7 @@
|
||||
package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
@@ -47,46 +48,87 @@ type RollingUpgradeFuncs struct {
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deployments.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(deployments.Items)
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range daemonSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(daemonSets.Items)
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range statefulSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(statefulSets.Items)
|
||||
}
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deploymentConfigs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(deploymentConfigs.Items)
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(meta_v1.ListOptions{})
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list Rollouts %v", err)
|
||||
}
|
||||
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range rollouts.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(rollouts.Items)
|
||||
}
|
||||
|
||||
@@ -193,38 +235,38 @@ func GetRolloutInitContainers(item interface{}) []v1.Container {
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deployment := resource.(appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(&deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(&daemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(&statefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
rollout := resource.(argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(rollout.Name, meta_v1.GetOptions{})
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(&rollout)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,9 @@ package cmd
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -18,9 +20,10 @@ import (
|
||||
// NewReloaderCommand starts the reloader controller
|
||||
func NewReloaderCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
Run: startReloader,
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
PreRunE: validateFlags,
|
||||
Run: startReloader,
|
||||
}
|
||||
|
||||
// options
|
||||
@@ -32,9 +35,25 @@ func NewReloaderCommand() *cobra.Command {
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateFlags(*cobra.Command, []string) error {
|
||||
// Ensure the reload strategy is one of the following...
|
||||
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
|
||||
for _, s := range valid {
|
||||
if s == options.ReloadStrategy {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
}
|
||||
|
||||
func configureLogging(logFormat string) error {
|
||||
switch logFormat {
|
||||
case "json":
|
||||
|
||||
@@ -7,4 +7,16 @@ const (
|
||||
SecretEnvVarPostfix = "SECRET"
|
||||
// EnvVarPrefix is a Prefix for environment variable
|
||||
EnvVarPrefix = "STAKATER_"
|
||||
|
||||
// ReloaderAnnotationPrefix is a Prefix for all reloader annotations
|
||||
ReloaderAnnotationPrefix = "reloader.stakater.com"
|
||||
// LastReloadedFromAnnotation is an annotation used to describe the last resource that triggered a reload
|
||||
LastReloadedFromAnnotation = "last-reloaded-from"
|
||||
|
||||
// ReloadStrategyFlag The reload strategy flag name
|
||||
ReloadStrategyFlag = "reload-strategy"
|
||||
// EnvVarsReloadStrategy instructs Reloader to add container environment variables to facilitate a restart
|
||||
EnvVarsReloadStrategy = "env-vars"
|
||||
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
|
||||
AnnotationsReloadStrategy = "annotations"
|
||||
)
|
||||
|
||||
@@ -30,6 +30,9 @@ type Controller struct {
|
||||
collectors metrics.Collectors
|
||||
}
|
||||
|
||||
// controllerInitialized flag determines whether controlled is being initialized
|
||||
var controllerInitialized bool = false
|
||||
|
||||
// NewController for initializing a Controller
|
||||
func NewController(
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
|
||||
@@ -57,8 +60,12 @@ func NewController(
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
|
||||
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
|
||||
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
@@ -111,6 +118,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
// At this point the controller is fully initialized and we can start processing the resources
|
||||
controllerInitialized = true
|
||||
|
||||
for c.processNextItem() {
|
||||
}
|
||||
}
|
||||
@@ -145,7 +155,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
|
||||
// This controller retries 5 times if something goes wrong. After that, it stops trying.
|
||||
if c.queue.NumRequeues(key) < 5 {
|
||||
logrus.Errorf("Error syncing events %v: %v", key, err)
|
||||
logrus.Errorf("Error syncing events: %v", err)
|
||||
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,7 +20,7 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
|
||||
@@ -85,32 +87,52 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) {
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
|
||||
if kube.IsOpenshift {
|
||||
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) {
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
var err error
|
||||
|
||||
for _, i := range items {
|
||||
// find correct annotation and update the resource
|
||||
annotations := upgradeFuncs.AnnotationsFunc(i)
|
||||
@@ -126,7 +148,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
if err == nil && reloaderEnabled {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
@@ -134,7 +156,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
result = updateContainers(upgradeFuncs, i, config, false)
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
break
|
||||
}
|
||||
@@ -145,7 +167,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,6 +177,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
return err
|
||||
} else {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
@@ -162,7 +185,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
@@ -236,7 +259,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
@@ -275,10 +298,70 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
|
||||
return container
|
||||
}
|
||||
|
||||
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
}
|
||||
|
||||
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
|
||||
// Note: the data on this struct is purely informational and is not used for future updates
|
||||
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, err := createReloadedAnnotations(&reloadSource)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
// Copy the all annotations to the item's annotations
|
||||
pa := upgradeFuncs.PodAnnotationsFunc(item)
|
||||
if pa == nil {
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
for k, v := range annotations {
|
||||
pa[k] = v
|
||||
}
|
||||
|
||||
return constants.Updated
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
|
||||
if target == nil {
|
||||
return nil, errors.New("target is required")
|
||||
}
|
||||
|
||||
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
|
||||
// resource that caused the last invokeReloadStrategy.
|
||||
// Intentionally only storing the last item in order to keep
|
||||
// the generated annotations as small as possible.
|
||||
annotations := make(map[string]string)
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
lastReloadedResource, err := json.Marshal(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
annotations[lastReloadedResourceName] = string(lastReloadedResource)
|
||||
return annotations, nil
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,7 @@
|
||||
package options
|
||||
|
||||
import "github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
var (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// configmaps specified by name
|
||||
@@ -17,4 +19,8 @@ var (
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// IsArgoRollouts Adds support for argo rollouts
|
||||
IsArgoRollouts = "false"
|
||||
// ReloadStrategy Specify the update strategy
|
||||
ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
)
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -33,7 +36,7 @@ var (
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to create namespace for testing %v", err)
|
||||
} else {
|
||||
@@ -43,7 +46,7 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
|
||||
// DeleteNamespace deletes namespace for testing
|
||||
func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
|
||||
} else {
|
||||
@@ -562,8 +565,8 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
|
||||
}
|
||||
}
|
||||
|
||||
// GetResourceSHA returns the SHA value of given environment variable
|
||||
func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
// GetResourceSHAFromEnvVar returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
@@ -575,6 +578,28 @@ func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetResourceSHAFromAnnotation returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
annotationJson, ok := podAnnotations[lastReloadedResourceName]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
var last util.ReloadSource
|
||||
bytes := []byte(annotationJson)
|
||||
err := json.Unmarshal(bytes, &last)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return last.Hash
|
||||
}
|
||||
|
||||
//ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
|
||||
values := []string{}
|
||||
@@ -597,7 +622,7 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
|
||||
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
|
||||
logrus.Infof("Creating configmap")
|
||||
configmapClient := client.CoreV1().ConfigMaps(namespace)
|
||||
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
|
||||
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return configmapClient, err
|
||||
}
|
||||
@@ -606,7 +631,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
|
||||
logrus.Infof("Creating secret")
|
||||
secretClient := client.CoreV1().Secrets(namespace)
|
||||
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
|
||||
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretClient, err
|
||||
}
|
||||
@@ -621,7 +646,7 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -636,7 +661,7 @@ func CreateDeploymentConfig(client appsclient.Interface, deploymentName string,
|
||||
} else {
|
||||
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
|
||||
time.Sleep(5 * time.Second)
|
||||
return deploymentConfig, err
|
||||
}
|
||||
@@ -651,7 +676,7 @@ func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentNa
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -661,7 +686,7 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
|
||||
@@ -672,7 +697,7 @@ func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentN
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -684,7 +709,7 @@ func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface,
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deploymentObj.Annotations = annotations
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -699,7 +724,7 @@ func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespac
|
||||
} else {
|
||||
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
|
||||
}
|
||||
daemonset, err := daemonsetClient.Create(daemonsetObj)
|
||||
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonset, err
|
||||
}
|
||||
@@ -714,7 +739,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
} else {
|
||||
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
|
||||
}
|
||||
statefulset, err := statefulsetClient.Create(statefulsetObj)
|
||||
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulset, err
|
||||
}
|
||||
@@ -722,7 +747,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentError
|
||||
}
|
||||
@@ -730,7 +755,7 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
|
||||
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
|
||||
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
|
||||
logrus.Infof("Deleting DeploymentConfig")
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentConfigError
|
||||
}
|
||||
@@ -738,7 +763,7 @@ func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deplo
|
||||
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
|
||||
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
|
||||
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonsetError
|
||||
}
|
||||
@@ -746,7 +771,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
|
||||
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
|
||||
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
|
||||
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulsetError
|
||||
}
|
||||
@@ -760,7 +785,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
|
||||
} else {
|
||||
configmap = GetConfigmap(namespace, configmapName, data)
|
||||
}
|
||||
_, updateErr := configmapClient.Update(configmap)
|
||||
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -774,7 +799,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
} else {
|
||||
secret = GetSecret(namespace, secretName, data)
|
||||
}
|
||||
_, updateErr := secretClient.Update(secret)
|
||||
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -782,7 +807,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
@@ -790,7 +815,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
// DeleteSecret deletes a secret in given namespace and returns the error if any
|
||||
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
|
||||
logrus.Infof("Deleting secret %q.\n", secretName)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
@@ -805,8 +830,8 @@ func RandSeq(n int) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// VerifyResourceUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
@@ -835,7 +860,45 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
|
||||
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
|
||||
updated := GetResourceSHA(containers, envName)
|
||||
updated := GetResourceSHAFromEnvVar(containers, envName)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if searchAnnotationValue == "true" {
|
||||
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if matches {
|
||||
updated := GetResourceSHAFromAnnotation(podAnnotations)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
|
||||
39
internal/pkg/util/reload_source.go
Normal file
39
internal/pkg/util/reload_source.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package util
|
||||
|
||||
import "time"
|
||||
|
||||
type ReloadSource struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Hash string `json:"hash"`
|
||||
ContainerRefs []string `json:"containerRefs"`
|
||||
ObservedAt int64 `json:"observedAt"`
|
||||
}
|
||||
|
||||
func NewReloadSource(
|
||||
resourceName string,
|
||||
resourceNamespace string,
|
||||
resourceType string,
|
||||
resourceHash string,
|
||||
containerRefs []string,
|
||||
) ReloadSource {
|
||||
return ReloadSource{
|
||||
ObservedAt: time.Now().Unix(),
|
||||
Name: resourceName,
|
||||
Namespace: resourceNamespace,
|
||||
Type: resourceType,
|
||||
Hash: resourceHash,
|
||||
ContainerRefs: containerRefs,
|
||||
}
|
||||
}
|
||||
|
||||
func NewReloadSourceFromConfig(config Config, containerRefs []string) ReloadSource {
|
||||
return NewReloadSource(
|
||||
config.ResourceName,
|
||||
config.Namespace,
|
||||
config.Type,
|
||||
config.SHAValue,
|
||||
containerRefs,
|
||||
)
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@@ -67,7 +68,7 @@ func isOpenshift() bool {
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
|
||||
if err == nil {
|
||||
logrus.Info("Environment: Openshift")
|
||||
return true
|
||||
|
||||
Reference in New Issue
Block a user