mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
89 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9843c7c7d | ||
|
|
1f154d0572 | ||
|
|
7ccb17392e | ||
|
|
4f551ada6e | ||
|
|
608a928967 | ||
|
|
5a14798341 | ||
|
|
e7516e82e3 | ||
|
|
dc3494c041 | ||
|
|
79e3588389 | ||
|
|
45a833bbb2 | ||
|
|
1f22ebe132 | ||
|
|
1846b31936 | ||
|
|
935a17b1c7 | ||
|
|
7b44a472ad | ||
|
|
a46b56271c | ||
|
|
2f9dd7c422 | ||
|
|
f373686b75 | ||
|
|
80557ce43e | ||
|
|
c4f6d93eb9 | ||
|
|
c75c787738 | ||
|
|
ba18bbfd72 | ||
|
|
610b4e5716 | ||
|
|
dc0715de61 | ||
|
|
4f6ff420e8 | ||
|
|
966d5e61c0 | ||
|
|
d017747792 | ||
|
|
70099fdc8f | ||
|
|
aaddec1103 | ||
|
|
b5fdcd577d | ||
|
|
8b9bf07631 | ||
|
|
674444850d | ||
|
|
e74dcc3cbd | ||
|
|
dcae4c98ac | ||
|
|
94a83c5974 | ||
|
|
592976bf09 | ||
|
|
ed736c8e20 | ||
|
|
84133742b1 | ||
|
|
04e19a733b | ||
|
|
c1ae5efb7b | ||
|
|
f630336fed | ||
|
|
fde312edcc | ||
|
|
57eb4f4eaa | ||
|
|
1490a1feaa | ||
|
|
58c622eb91 | ||
|
|
2fd8b190b1 | ||
|
|
81c840ea30 | ||
|
|
21dbeb9810 | ||
|
|
fba004d655 | ||
|
|
631781aa8a | ||
|
|
707dccf6b8 | ||
|
|
5edd29b8e9 | ||
|
|
27815ea3b3 | ||
|
|
5fd275a05c | ||
|
|
b22694d3c2 | ||
|
|
5c95c6898b | ||
|
|
46bc4b71db | ||
|
|
cee81b4757 | ||
|
|
1cec52637f | ||
|
|
1901a4eb49 | ||
|
|
710396f66e | ||
|
|
11bafa9f36 | ||
|
|
9a45318fc9 | ||
|
|
843f47600a | ||
|
|
3d9dee27b5 | ||
|
|
63fd3c2635 | ||
|
|
284ca59ca4 | ||
|
|
2ce24abe40 | ||
|
|
6419444663 | ||
|
|
1a6fd3e302 | ||
|
|
7ac90b8c88 | ||
|
|
faf27c2d5d | ||
|
|
6a0dfd3ce0 | ||
|
|
fdbc3067ce | ||
|
|
c4ead210ee | ||
|
|
0441f6d481 | ||
|
|
09b9a073a0 | ||
|
|
d6d188f224 | ||
|
|
422c291b06 | ||
|
|
ed6ea026a8 | ||
|
|
da30b4744b | ||
|
|
503e357349 | ||
|
|
61e9202781 | ||
|
|
8dbe7a85af | ||
|
|
e86f616305 | ||
|
|
0c36cfd602 | ||
|
|
f38f86a45c | ||
|
|
5033b8fcdc | ||
|
|
be4285742a | ||
|
|
6a008999f5 |
134
.github/workflows/pull_request.yaml
vendored
Normal file
134
.github/workflows/pull_request.yaml
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
|
||||
- name: Helm Lint
|
||||
run: |
|
||||
cd deployments/kubernetes/chart/reloader
|
||||
helm lint
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
|
||||
sudo install ./kind /usr/local/bin && rm kind
|
||||
kind version
|
||||
kind version | grep -q ${KIND_VERSION}
|
||||
|
||||
- name: Create Kind Cluster
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
run: |
|
||||
sha=${{ github.event.pull_request.head.sha }}
|
||||
tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-${sha:0:8}"
|
||||
echo "##[set-output name=GIT_TAG;]$(echo ${tag})"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Build and Push Docker Image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Comment on PR
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Failure
|
||||
if: failure()
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
175
.github/workflows/push.yaml
vendored
Normal file
175
.github/workflows/push.yaml
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
name: Push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
|
||||
sudo install ./kind /usr/local/bin && rm kind
|
||||
kind version
|
||||
kind version | grep -q ${KIND_VERSION}
|
||||
|
||||
- name: Create Kind Cluster
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
##############################
|
||||
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
|
||||
##############################
|
||||
|
||||
# Generate tag for operator without "v"
|
||||
- name: Generate Operator Tag
|
||||
id: generate_operator_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: false
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
# Update chart tag to the latest semver tag
|
||||
- name: Update Chart Version
|
||||
env:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
branch: master
|
||||
repository: stakater-charts
|
||||
target_dir: docs
|
||||
token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
charts_dir: deployments/kubernetes/chart/
|
||||
charts_url: ${{ env.HELM_REGISTRY_URL }}
|
||||
owner: stakater
|
||||
linting: on
|
||||
commit_username: stakater-user
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
# Commit back changes
|
||||
- name: Commit files
|
||||
run: |
|
||||
git config --local user.email "stakater@gmail.com"
|
||||
git config --local user.name "stakater-user"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "[skip-ci] Update artifacts" -a
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
44
.github/workflows/release.yaml
vendored
Normal file
44
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Release Go project
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: 1.15.2
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: GoReleaser build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
id: go
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@master
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always()
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
31
Dockerfile
Normal file
31
Dockerfile
Normal file
@@ -0,0 +1,31 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.15.2 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY main.go main.go
|
||||
COPY internal/ internal/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER nonroot:nonroot
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
8
Jenkinsfile
vendored
8
Jenkinsfile
vendored
@@ -1,8 +0,0 @@
|
||||
#!/usr/bin/groovy
|
||||
@Library('github.com/stakater/stakater-pipeline-library@v2.16.24') _
|
||||
|
||||
goBuildViaGoReleaser {
|
||||
publicChartRepositoryURL = 'https://stakater.github.io/stakater-charts'
|
||||
publicChartGitURL = 'git@github.com:stakater/stakater-charts.git'
|
||||
toolsImage = 'stakater/pipeline-tools:v2.0.18'
|
||||
}
|
||||
66
Makefile
66
Makefile
@@ -1,15 +1,20 @@
|
||||
# note: call scripts from /scripts
|
||||
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
|
||||
|
||||
BUILDER ?= reloader-builder
|
||||
OS ?= linux
|
||||
ARCH ?= ???
|
||||
ALL_ARCH ?= arm64 arm amd64
|
||||
|
||||
BUILDER ?= reloader-builder-${ARCH}
|
||||
BINARY ?= Reloader
|
||||
DOCKER_IMAGE ?= stakater/reloader
|
||||
# Default value "dev"
|
||||
DOCKER_TAG ?= 1.0.0
|
||||
REPOSITORY = ${DOCKER_IMAGE}:${DOCKER_TAG}
|
||||
TAG ?= v0.0.75.0
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
|
||||
|
||||
VERSION=$(shell cat .version)
|
||||
VERSION ?= 0.0.1
|
||||
BUILD=
|
||||
|
||||
GOCMD = go
|
||||
@@ -25,10 +30,35 @@ build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
builder-image:
|
||||
@docker build --network host -t "${BUILDER}" -f build/package/Dockerfile.build .
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
|
||||
reloader-${ARCH}.tar:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
|
||||
|
||||
binary-image: builder-image
|
||||
@docker run --network host --rm "${BUILDER}" | docker build --network host -t "${REPOSITORY}" -f Dockerfile.run -
|
||||
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
|
||||
|
||||
push:
|
||||
docker push ${REPOSITORY_ARCH}
|
||||
|
||||
release: binary-image push manifest
|
||||
|
||||
release-all:
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
# Make arch-specific release
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Make release: $$arch ; \
|
||||
make release ARCH=$$arch ; \
|
||||
done
|
||||
|
||||
set -e
|
||||
docker manifest push --purge $(REPOSITORY_GENERIC)
|
||||
|
||||
manifest:
|
||||
set -e
|
||||
docker manifest create -a $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
|
||||
docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
|
||||
|
||||
test:
|
||||
"$(GOCMD)" test -timeout 1800s -v ./...
|
||||
@@ -37,15 +67,29 @@ stop:
|
||||
@docker stop "${BINARY}"
|
||||
|
||||
clean-images: stop
|
||||
@docker rmi "${BUILDER}" "${BINARY}"
|
||||
-docker rmi "${BINARY}"
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Clean image: $$arch ; \
|
||||
make clean-image ARCH=$$arch ; \
|
||||
done
|
||||
-docker rmi "${REPOSITORY_GENERIC}"
|
||||
|
||||
clean-image:
|
||||
-docker rmi "${BUILDER}"
|
||||
-docker rmi "${REPOSITORY_ARCH}"
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
|
||||
clean:
|
||||
"$(GOCMD)" clean -i
|
||||
|
||||
push: ## push the latest Docker image to DockerHub
|
||||
docker push $(REPOSITORY)
|
||||
-rm -rf reloader-*.tar
|
||||
|
||||
apply:
|
||||
kubectl apply -f deployments/manifests/ -n temp-reloader
|
||||
|
||||
deploy: binary-image push apply
|
||||
|
||||
# Bump Chart
|
||||
bump-chart:
|
||||
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
|
||||
17
README.md
17
README.md
@@ -191,20 +191,20 @@ namespace: reloader
|
||||
|
||||
### Helm Charts
|
||||
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3
|
||||
|
||||
```bash
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm repo update
|
||||
|
||||
helm install stakater/reloader
|
||||
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
|
||||
```
|
||||
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
|
||||
|
||||
```bash
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
|
||||
```
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
|
||||
@@ -218,6 +218,8 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` to `true` in values.yaml file.
|
||||
|
||||
## Help
|
||||
|
||||
### Documentation
|
||||
@@ -232,8 +234,8 @@ File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us a
|
||||
|
||||
Join and talk to us on Slack for discussing Reloader
|
||||
|
||||
[](https://stakater-slack.herokuapp.com/)
|
||||
[](https://stakater.slack.com/messages/CC5S05S12)
|
||||
[](https://slack.stakater.com/)
|
||||
[](https://stakater-community.slack.com/messages/CC5S05S12)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -243,6 +245,11 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
|
||||
|
||||
### Developing
|
||||
|
||||
1. Deploy Reloader.
|
||||
2. Run `okteto up` to activate your development container.
|
||||
3. `make build`.
|
||||
4. `./Reloader`
|
||||
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
|
||||
1. **Fork** the repo on GitHub
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
FROM golang:1.13.9-alpine
|
||||
FROM golang:1.15.2-alpine
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
ARG GOARCH=amd64
|
||||
|
||||
RUN apk -v --update \
|
||||
--no-cache \
|
||||
add git build-base
|
||||
@@ -13,7 +15,7 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0 GOOS=linux GOARCH=amd64
|
||||
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
|
||||
|
||||
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
|
||||
|
||||
@@ -8,4 +8,7 @@ COPY Reloader /bin/Reloader
|
||||
# On alpine 'nobody' has uid 65534
|
||||
USER 65534
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/bin/Reloader"]
|
||||
|
||||
@@ -3,27 +3,29 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: v0.0.65
|
||||
appVersion: v0.0.65
|
||||
version: v0.0.82
|
||||
appVersion: v0.0.82
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: ahmedwaleedmalik
|
||||
email: waleed@stakater.com
|
||||
|
||||
@@ -5,6 +5,7 @@ approvers:
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
- ahmedwaleedmalik
|
||||
reviewers:
|
||||
- faizanahmad055
|
||||
- kahootali
|
||||
@@ -12,3 +13,4 @@ reviewers:
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
- ahmedwaleedmalik
|
||||
|
||||
@@ -12,15 +12,20 @@ Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "reloader-fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
@@ -33,3 +38,11 @@ Create the name of the service account to use
|
||||
{{ default "default" .Values.reloader.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the annotations to support helm3
|
||||
*/}}
|
||||
{{- define "reloader-helm3.annotations" -}}
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
@@ -1,7 +1,13 @@
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
@@ -88,12 +89,25 @@ spec:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (eq .Values.reloader.ignoreConfigMaps true) (.Values.reloader.custom_annotations) }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) }}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -101,22 +115,33 @@ spec:
|
||||
{{- if .Values.reloader.ignoreSecrets }}
|
||||
- "--resources-to-ignore=secrets"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.ignoreConfigMaps true }}
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}
|
||||
- "--resources-to-ignore=configMaps"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.secret }}
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.auto }}
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.search }}
|
||||
- "--auto-search-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.search }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.match }}
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
{{- if .Values.reloader.service.annotations }}
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.service.annotations }}
|
||||
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
@@ -21,5 +22,8 @@ spec:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{ toYaml .Values.reloader.service.ports | indent 4 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.reloader.service.port }}
|
||||
name: http
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{- end }}
|
||||
|
||||
@@ -5,6 +5,11 @@ kind: ServiceAccount
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.annotations }}
|
||||
{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.labels }}
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.serviceMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- targetPort: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.serviceMonitor.interval }}
|
||||
interval: {{ .Values.reloader.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.serviceMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -12,10 +12,13 @@ reloader:
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
nodeSelector:
|
||||
@@ -48,10 +51,10 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.65
|
||||
version: v0.0.77
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: "v0.0.65"
|
||||
tag: v0.0.82
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -78,11 +81,7 @@ reloader:
|
||||
service: {}
|
||||
# labels: {}
|
||||
# annotations: {}
|
||||
# ports:
|
||||
# - port: 9090
|
||||
# name: http
|
||||
# protocol: TCP
|
||||
# targetPort: 9090
|
||||
# port: 9090
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
@@ -92,6 +91,7 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
@@ -101,3 +101,15 @@ reloader:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
@@ -4,11 +4,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
|
||||
@@ -4,11 +4,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
|
||||
@@ -3,14 +3,18 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.65
|
||||
version: v0.0.77
|
||||
|
||||
name: reloader-reloader
|
||||
spec:
|
||||
@@ -24,18 +28,31 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.65
|
||||
version: v0.0.77
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.65"
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
|
||||
@@ -4,10 +4,14 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
|
||||
4
deployments/kubernetes/manifests/servicemonitor.yaml
Normal file
4
deployments/kubernetes/manifests/servicemonitor.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
@@ -4,11 +4,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
@@ -49,11 +53,15 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
@@ -70,14 +78,18 @@ subjects:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.65
|
||||
version: v0.0.77
|
||||
|
||||
name: reloader-reloader
|
||||
spec:
|
||||
@@ -91,18 +103,31 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.65
|
||||
version: v0.0.77
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.65"
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
@@ -120,16 +145,25 @@ spec:
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.65"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
|
||||
@@ -12,10 +12,13 @@ reloader:
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
nodeSelector:
|
||||
@@ -78,11 +81,7 @@ reloader:
|
||||
service: {}
|
||||
# labels: {}
|
||||
# annotations: {}
|
||||
# ports:
|
||||
# - port: 9090
|
||||
# name: http
|
||||
# protocol: TCP
|
||||
# targetPort: 9090
|
||||
# port: 9090
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
@@ -92,6 +91,7 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
@@ -101,3 +101,15 @@ reloader:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
41
docs/Container Build.md
Normal file
41
docs/Container Build.md
Normal file
@@ -0,0 +1,41 @@
|
||||
|
||||
# Container Build
|
||||
> **WARNING:** As a user of Reloader there is no need to build containers, these are freely available here: https://hub.docker.com/r/stakater/reloader/
|
||||
|
||||
Multi-architecture approach is based on original work by @mdh02038: https://github.com/mdh02038/Reloader
|
||||
|
||||
Images tested on linux/arm, linux/arm64 and linux/amd64.
|
||||
|
||||
# Install Pre-Reqs
|
||||
The build environment requires the following packages (tested on Ubuntu 20.04):
|
||||
* golang
|
||||
* make
|
||||
* qemu (for arm, arm64 etc. emulation)
|
||||
* binfmt-support
|
||||
* Docker engine
|
||||
|
||||
## Docker
|
||||
Follow instructions here: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
|
||||
|
||||
Once installed, enable the experimental CLI:
|
||||
```
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
```
|
||||
Login, to enable publishing of packages:
|
||||
```
|
||||
sudo docker login
|
||||
```
|
||||
## Remaining Pre-Reqs
|
||||
Remaining Pre-Reqs can be installed via:
|
||||
```
|
||||
sudo apt install golang make qemu-user-static binfmt-support -y
|
||||
```
|
||||
|
||||
# Publish Multi-Architecture Image
|
||||
To build/ publish multi-arch Docker images clone repository and execute from repository root:
|
||||
```
|
||||
sudo make release-all
|
||||
```
|
||||
|
||||
# Additional Links/ Info
|
||||
* *https://medium.com/@artur.klauser/building-multi-architecture-docker-images-with-buildx-27d80f7e2408
|
||||
62
docs/Helm2-to-Helm3.md
Normal file
62
docs/Helm2-to-Helm3.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Helm2 to Helm3 Migration
|
||||
|
||||
Follow below mentioned instructions to migrate reloader from Helm2 to Helm3
|
||||
|
||||
## Instrcutions:
|
||||
|
||||
There are 3 steps involved in migrating the reloader from Helm2 to Helm3.
|
||||
|
||||
### Step 1:
|
||||
Install the helm-2to3 plugin
|
||||
|
||||
```bash
|
||||
helm3 plugin install https://github.com/helm/helm-2to3
|
||||
|
||||
helm3 2to3 convert <release-name>
|
||||
|
||||
helm3 2to3 cleanup --release-cleanup --skip-confirmation
|
||||
```
|
||||
|
||||
### Step 2:
|
||||
Add the following Helm3 labels and annotations on reloader resources.
|
||||
|
||||
Label:
|
||||
|
||||
```yaml
|
||||
app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
Annotations:
|
||||
```yaml
|
||||
meta.helm.sh/release-name=<release-name>
|
||||
meta.helm.sh/release-namespace=<namespace>
|
||||
```
|
||||
|
||||
For example, to label and annotate the ClusterRoleBinding and ClusterRole:
|
||||
|
||||
```bash
|
||||
KIND=ClusterRoleBinding
|
||||
NAME=reloader-reloader-role-binding
|
||||
RELEASE=reloader
|
||||
NAMESPACE=kube-system
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
|
||||
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
|
||||
|
||||
KIND=ClusterRole
|
||||
NAME=reloader-reloader-role
|
||||
RELEASE=reloader
|
||||
NAMESPACE=kube-system
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
|
||||
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
|
||||
### Step 3:
|
||||
Upgrade to desired version
|
||||
```bash
|
||||
helm3 repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm3 repo update
|
||||
|
||||
helm3 upgrade <release-name> stakater/reloader --version=v0.0.72
|
||||
```
|
||||
@@ -8,5 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
|
||||
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
|
||||
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
|
||||
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
|
||||
These are the key features of Reloader:
|
||||
|
||||
1. Restart pod in a depoloyment on change in linked/related configmap's or secret's
|
||||
1. Restart pod in a deployment on change in linked/related configmap's or secret's
|
||||
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
|
||||
3. Restart pod in a statefulset on change in linked/related configmap's or secret's
|
||||
|
||||
4
go.mod
4
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.13
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
|
||||
@@ -12,8 +12,6 @@ require (
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
||||
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
|
||||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
|
||||
|
||||
7
go.sum
7
go.sum
@@ -161,8 +161,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I=
|
||||
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@@ -239,6 +237,7 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
@@ -249,8 +248,6 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -258,8 +255,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
|
||||
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
|
||||
@@ -57,12 +57,8 @@ func NewController(
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
if !c.resourceInIgnoredNamespace(obj) {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
})
|
||||
}
|
||||
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
|
||||
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
@@ -29,6 +30,10 @@ var (
|
||||
collectors = metrics.NewCollectors()
|
||||
)
|
||||
|
||||
const (
|
||||
sleepDuration = 3 * time.Second
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
testutil.CreateNamespace(namespace, clients.KubernetesClient)
|
||||
@@ -45,7 +50,7 @@ func TestMain(m *testing.M) {
|
||||
defer close(stop)
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
@@ -95,7 +100,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
|
||||
if !updated {
|
||||
t.Errorf("DeploymentConfig was not updated")
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
|
||||
@@ -108,7 +113,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create env var upon updating the configmap
|
||||
@@ -147,7 +152,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -160,7 +165,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create env var upon updating the configmap
|
||||
@@ -199,7 +204,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -212,12 +217,15 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create env var upon creating the configmap
|
||||
func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
|
||||
// TODO: Fix this test case
|
||||
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
|
||||
|
||||
// Creating configmap
|
||||
configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
|
||||
@@ -237,14 +245,14 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com")
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the configmap second time %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
@@ -260,7 +268,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -273,7 +281,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and update env var upon updating the configmap
|
||||
@@ -319,7 +327,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -332,11 +340,11 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap
|
||||
func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
|
||||
func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
|
||||
// Creating configmap
|
||||
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
|
||||
@@ -370,7 +378,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment
|
||||
if updated {
|
||||
t.Errorf("Deployment should not be updated by changing label")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -383,11 +391,15 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon creating the secret
|
||||
func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
|
||||
// TODO: Fix this test case
|
||||
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
|
||||
|
||||
// Creating secret
|
||||
secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
|
||||
@@ -406,14 +418,14 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData)
|
||||
if err != nil {
|
||||
t.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
@@ -425,7 +437,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
@@ -442,7 +454,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon updating the secret
|
||||
@@ -492,7 +504,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and update env var upon updating the secret
|
||||
@@ -548,11 +560,11 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
|
||||
// Creating secret
|
||||
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
|
||||
@@ -597,7 +609,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap
|
||||
@@ -635,7 +647,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting DaemonSet
|
||||
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -648,7 +660,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap
|
||||
@@ -672,7 +684,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
t.Errorf("Configmap was not updated")
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Updating configmap for second time
|
||||
updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io")
|
||||
@@ -680,7 +692,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
t.Errorf("Configmap was not updated")
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
@@ -696,7 +708,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting DaemonSet
|
||||
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -709,7 +721,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon updating the secret
|
||||
@@ -759,7 +771,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and update env var upon updating the secret
|
||||
@@ -782,7 +794,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Updating Secret
|
||||
err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData)
|
||||
@@ -816,11 +828,11 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *testing.T) {
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) {
|
||||
// Creating secret
|
||||
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
|
||||
@@ -865,7 +877,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap
|
||||
@@ -903,7 +915,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting StatefulSet
|
||||
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -916,7 +928,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap
|
||||
@@ -960,7 +972,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting StatefulSet
|
||||
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -973,7 +985,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon updating the secret
|
||||
@@ -1023,7 +1035,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on StatefulSet and update env var upon updating the secret
|
||||
@@ -1079,7 +1091,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
func TestController_resourceInIgnoredNamespace(t *testing.T) {
|
||||
|
||||
@@ -33,7 +33,7 @@ func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
|
||||
@@ -116,6 +116,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
result = updateContainers(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
@@ -260,7 +261,7 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
|
||||
|
||||
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
@@ -268,12 +269,12 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
|
||||
}
|
||||
|
||||
//update if env var exists
|
||||
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envar, config.SHAValue)
|
||||
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if result == constants.NoEnvVarFound {
|
||||
e := v1.EnvVar{
|
||||
Name: envar,
|
||||
Name: envVar,
|
||||
Value: config.SHAValue,
|
||||
}
|
||||
container.Env = append(container.Env, e)
|
||||
@@ -282,11 +283,11 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
|
||||
return result
|
||||
}
|
||||
|
||||
func updateEnvVar(containers []v1.Container, envar string, shaData string) constants.Result {
|
||||
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Name == envVar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return constants.Updated
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
@@ -657,11 +656,12 @@ func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolume(t *testing.T)
|
||||
}
|
||||
}
|
||||
|
||||
func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
|
||||
configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
|
||||
configmapObj.Annotations = annotations
|
||||
return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
|
||||
}
|
||||
// Un-used function
|
||||
// func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
|
||||
// configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
|
||||
// configmapObj.Annotations = annotations
|
||||
// return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
|
||||
// }
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotation(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
|
||||
@@ -720,7 +720,10 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create deployment with search annotation.")
|
||||
}
|
||||
defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
defer func() {
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
}()
|
||||
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
|
||||
|
||||
@@ -563,11 +563,11 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
|
||||
}
|
||||
|
||||
// GetResourceSHA returns the SHA value of given environment variable
|
||||
func GetResourceSHA(containers []v1.Container, envar string) string {
|
||||
func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Name == envVar {
|
||||
return envs[j].Value
|
||||
}
|
||||
}
|
||||
@@ -821,6 +821,7 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
|
||||
@@ -23,7 +23,7 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
ResourceName: configmap.Name,
|
||||
ResourceAnnotations: configmap.Annotations,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap.Data),
|
||||
SHAValue: GetSHAfromConfigmap(configmap),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,10 +2,12 @@ package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ConvertToEnvVarName converts the given text into a usable env var
|
||||
@@ -29,11 +31,14 @@ func ConvertToEnvVarName(text string) string {
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func GetSHAfromConfigmap(data map[string]string) string {
|
||||
func GetSHAfromConfigmap(configmap *v1.ConfigMap) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
for k, v := range configmap.Data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
for k, v := range configmap.BinaryData {
|
||||
values = append(values, k+"="+base64.StdEncoding.EncodeToString(v))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestConvertToEnvVarName(t *testing.T) {
|
||||
@@ -11,3 +13,35 @@ func TestConvertToEnvVarName(t *testing.T) {
|
||||
t.Errorf("Failed to convert data into environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHashFromConfigMap(t *testing.T) {
|
||||
data := map[*v1.ConfigMap]string{
|
||||
{
|
||||
Data: map[string]string{"test": "test"},
|
||||
}: "Only Data",
|
||||
{
|
||||
Data: map[string]string{"test": "test"},
|
||||
BinaryData: map[string][]byte{"bintest": []byte("test")},
|
||||
}: "Both Data and BinaryData",
|
||||
{
|
||||
BinaryData: map[string][]byte{"bintest": []byte("test")},
|
||||
}: "Only BinaryData",
|
||||
}
|
||||
converted := map[string]string{}
|
||||
for cm, cmName := range data {
|
||||
converted[cmName] = GetSHAfromConfigmap(cm)
|
||||
}
|
||||
|
||||
// Test that the has for each configmap is really unique
|
||||
for cmName, cmHash := range converted {
|
||||
count := 0
|
||||
for _, cmHash2 := range converted {
|
||||
if cmHash == cmHash2 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
if count > 1 {
|
||||
t.Errorf("Found duplicate hashes for %v", cmName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
14
okteto.yml
Normal file
14
okteto.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: reloader-reloader
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
Reference in New Issue
Block a user