Compare commits

...

47 Commits

Author SHA1 Message Date
stakater-user
7b44a472ad [skip-ci] Update artifacts 2021-01-26 11:12:27 +00:00
Ahmed Waleed Malik
a46b56271c Merge pull request #197 from stakater/fix-pipeline
Fix version env var in Makefile
2021-01-26 16:00:48 +05:00
Waleed Malik
2f9dd7c422 Fix version env var in Makefile 2021-01-26 15:57:25 +05:00
Ahmed Waleed Malik
f373686b75 Merge pull request #195 from stakater/fix-chart-path
Fix chart path for helm publish step
2021-01-26 15:36:32 +05:00
Ahmed Waleed Malik
80557ce43e Fix chart path for helm publish step 2021-01-26 15:34:58 +05:00
Ahmed Waleed Malik
c4f6d93eb9 Merge pull request #194 from stakater/ahmedwaleedmalik-patch-1
Update push.yaml
2021-01-26 15:17:16 +05:00
Ahmed Waleed Malik
c75c787738 Update push.yaml 2021-01-26 15:16:12 +05:00
Ahmed Waleed Malik
ba18bbfd72 Merge pull request #193 from stakater/update-chart-publish
Update chart publish step
2021-01-26 15:01:24 +05:00
Waleed Malik
610b4e5716 Update chart publish step 2021-01-26 14:56:06 +05:00
Ahmed Waleed Malik
dc0715de61 Merge pull request #192 from stakater/use-pr-target-hook
[skip-ci] use pull_request_target hook for pipelines against PRs
2021-01-26 14:51:15 +05:00
Waleed Malik
4f6ff420e8 Fix chart publish step 2021-01-26 14:31:04 +05:00
Waleed Malik
966d5e61c0 [skip-ci] use pull_request_target hook for pipelines against PRs 2021-01-26 14:10:56 +05:00
Ahmed Waleed Malik
d017747792 Merge pull request #189 from stakater/workflow-implementation
Workflow implementation
2021-01-26 14:10:10 +05:00
Waleed Malik
70099fdc8f Fix helm lint step 2021-01-26 13:54:14 +05:00
Waleed Malik
aaddec1103 Skip failing test cases 2021-01-26 12:59:49 +05:00
Waleed Malik
b5fdcd577d Refactor controller test cases 2021-01-26 12:04:40 +05:00
Waleed Malik
8b9bf07631 Temporarily switch to pull_request hook for testing 2021-01-26 11:42:40 +05:00
Waleed Malik
674444850d Merge latest master 2021-01-26 11:35:44 +05:00
Waleed Malik
e74dcc3cbd Update workflows 2021-01-26 11:30:38 +05:00
Waleed Malik
dcae4c98ac Add updated Dockerfile 2021-01-26 11:30:22 +05:00
Waleed Malik
94a83c5974 Bump golang version to 1.15 2021-01-26 11:14:52 +05:00
Waleed Malik
592976bf09 Run go mod tidy 2021-01-26 11:11:30 +05:00
Waleed Malik
ed736c8e20 Remove .VERSION file 2021-01-26 11:11:21 +05:00
Ahmed Waleed Malik
84133742b1 Merge pull request #186 from coldfire84/pr-docker-multi-arch
Enable support for multi-arch container image build/ publish: linux/arm, linux/arm64 and linux/amd64.
2021-01-26 11:09:00 +05:00
stakater-user
04e19a733b Bump Version to v0.0.77 2021-01-21 08:32:24 +00:00
Ahmed Waleed Malik
c1ae5efb7b Merge pull request #190 from gracedo/gracedo/check_api_legacy
[helm chart] Check api version availability instead of using legacy value
2021-01-21 13:00:23 +05:00
Grace Do
f630336fed Check api version availability instead of using legacy value 2021-01-19 10:18:27 -08:00
talha0324
fde312edcc Update golang code lint errors 2021-01-19 15:54:30 +05:00
talha0324
57eb4f4eaa Updates to the workflow 2021-01-19 15:22:31 +05:00
talha0324
1490a1feaa Updates to workflow and few path updates 2021-01-18 17:43:07 +05:00
talha0324
58c622eb91 Added workflow files for Jenkins replacement 2021-01-18 17:33:02 +05:00
stakater-user
2fd8b190b1 Bump Version to v0.0.76 2021-01-11 04:45:48 +00:00
Ahmed Waleed Malik
81c840ea30 Merge pull request #187 from stakater/fix-issue-166
Remove redundant reload on resource creation
2021-01-11 09:36:26 +05:00
faizanahmad055
21dbeb9810 Remove redundant reload on resource creation
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-01-08 17:28:50 +01:00
Chris Bradford
fba004d655 Enable support for multi-arch container images: arm, arm64 and amd64.
Add Container Build documentation.
2020-12-19 12:38:32 +00:00
stakater-user
631781aa8a Bump Version to v0.0.75 2020-12-01 12:11:52 +00:00
Ali Kahoot
707dccf6b8 Merge pull request #184 from stakater/fix-helm-chart-template
Fix helm chart template
2020-12-01 17:02:49 +05:00
Waleed Malik
5edd29b8e9 Remove redundant fields from service in helm chart 2020-12-01 16:58:51 +05:00
Waleed Malik
27815ea3b3 Update values.service.ports to values.service.port 2020-12-01 16:48:10 +05:00
Waleed Malik
5fd275a05c Add waleed as reviewer and approver 2020-12-01 16:39:11 +05:00
Waleed Malik
b22694d3c2 Add servicemonitor in values template 2020-12-01 16:38:55 +05:00
Ahmed Waleed Malik
5c95c6898b Merge pull request #180 from dpetersen/service-monitor
Add optional ServiceMonitor object to Helm chart
2020-12-01 15:08:00 +05:00
Jose Bautista
46bc4b71db update readme 2020-11-28 17:28:37 +02:00
Don Petersen
cee81b4757 Add optional ServiceMonitor object to Helm chart
This adds the ability to create a ServiceMonitor instance to configure
Prometheus to monitor reloader for metrics. ServiceMonitor is a CRD that
comes with the prometheus-operator project.
2020-11-25 13:25:07 -08:00
stakater-user
1cec52637f Bump Version to v0.0.74 2020-10-28 17:08:26 +00:00
Ahmed Waleed Malik
1901a4eb49 Merge pull request #146 from mnach/add-metrics-service
add metrics endpoints to kubernetes specs
2020-10-28 21:57:15 +05:00
Mikhail Vladimirovich Nacharov
710396f66e add metrics endpoints to kubernetes specs 2020-10-28 01:13:49 +05:00
34 changed files with 717 additions and 154 deletions

View File

@@ -1 +0,0 @@
version: v0.0.73

133
.github/workflows/pull_request.yaml vendored Normal file
View File

@@ -0,0 +1,133 @@
name: Pull Request
on:
pull_request_target:
branches:
- master
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
jobs:
build:
runs-on: ubuntu-latest
name: Build
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
steps:
- name: Check out code
uses: actions/checkout@v2
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v1
- name: Set up Go
id: go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install Dependencies
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Helm Lint
run: |
cd deployments/kubernetes/chart/reloader
helm lint
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
sudo install ./kubectl /usr/local/bin/ && rm kubectl
kubectl version --short --client
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
- name: Install Kind
run: |
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
sudo install ./kind /usr/local/bin && rm kind
kind version
kind version | grep -q ${KIND_VERSION}
- name: Create Kind Cluster
run: |
kind create cluster
kubectl cluster-info
- name: Test
run: make test
- name: Generate Tag
id: generate_tag
run: |
sha=${{ github.event.pull_request.head.sha }}
tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-${sha:0:8}"
echo "##[set-output name=GIT_TAG;]$(echo ${tag})"
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Registry
uses: docker/login-action@v1
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
- name: Generate image repository path
run: |
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and Push Docker Image
uses: docker/build-push-action@v2
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
tags: |
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
- name: Comment on PR
uses: mshick/add-pr-comment@v1
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
allow-repeats: false
- name: Notify Failure
if: failure()
uses: mshick/add-pr-comment@v1
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
allow-repeats: false
- name: Notify Slack
uses: 8398a7/action-slack@v3
if: always() # Pick up events even if the job fails or is canceled.
with:
status: ${{ job.status }}
fields: repo,author,action,eventName,ref,workflow
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

174
.github/workflows/push.yaml vendored Normal file
View File

@@ -0,0 +1,174 @@
name: Push
on:
push:
branches:
- master
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
jobs:
build:
name: Build
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v1
- name: Set up Go
id: go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install Dependencies
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
sudo install ./kubectl /usr/local/bin/ && rm kubectl
kubectl version --short --client
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
- name: Install Kind
run: |
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
sudo install ./kind /usr/local/bin && rm kind
kind version
kind version | grep -q ${KIND_VERSION}
- name: Create Kind Cluster
run: |
kind create cluster
kubectl cluster-info
- name: Test
run: make test
- name: Generate Tag
id: generate_tag
uses: anothrNick/github-tag-action@1.26.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true
DEFAULT_BUMP: patch
DRY_RUN: true
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Registry
uses: docker/login-action@v1
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
- name: Generate image repository path
run: |
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
tags: |
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
##############################
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
##############################
# Generate tag for operator without "v"
- name: Generate Operator Tag
id: generate_operator_tag
uses: anothrNick/github-tag-action@1.26.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: false
DEFAULT_BUMP: patch
DRY_RUN: true
# Update chart tag to the latest semver tag
- name: Update Chart Version
env:
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
run: make bump-chart
# Publish helm chart
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@master
with:
branch: master
repository: stakater-charts
target_dir: docs
token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
charts_dir: deployments/kubernetes/chart/
charts_url: ${{ env.HELM_REGISTRY_URL }}
owner: stakater
linting: on
commit_username: stakater-user
commit_email: stakater@gmail.com
# Commit back changes
- name: Commit files
run: |
git config --local user.email "stakater@gmail.com"
git config --local user.name "stakater-user"
git status
git add .
git commit -m "[skip-ci] Update artifacts" -a
- name: Push changes
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
branch: ${{ github.ref }}
- name: Push Latest Tag
uses: anothrNick/github-tag-action@1.26.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true
DEFAULT_BUMP: patch
- name: Notify Slack
uses: 8398a7/action-slack@v3
if: always() # Pick up events even if the job fails or is canceled.
with:
status: ${{ job.status }}
fields: repo,author,action,eventName,ref,workflow
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

44
.github/workflows/release.yaml vendored Normal file
View File

@@ -0,0 +1,44 @@
name: Release Go project
on:
push:
tags:
- "v*"
env:
GOLANG_VERSION: 1.15.2
jobs:
build:
name: GoReleaser build
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
with:
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ env.GOLANG_VERSION }}
id: go
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@master
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
- name: Notify Slack
uses: 8398a7/action-slack@v3
if: always()
with:
status: ${{ job.status }}
fields: repo,author,action,eventName,ref,workflow
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

31
Dockerfile Normal file
View File

@@ -0,0 +1,31 @@
# Build the manager binary
FROM golang:1.15.2 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY internal/ internal/
COPY pkg/ pkg/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=mod -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER nonroot:nonroot
# Port for metrics and probes
EXPOSE 9090
ENTRYPOINT ["/manager"]

8
Jenkinsfile vendored
View File

@@ -1,8 +0,0 @@
#!/usr/bin/groovy
@Library('github.com/stakater/stakater-pipeline-library@v2.16.24') _
goBuildViaGoReleaser {
publicChartRepositoryURL = 'https://stakater.github.io/stakater-charts'
publicChartGitURL = 'git@github.com:stakater/stakater-charts.git'
toolsImage = 'stakater/pipeline-tools:v2.0.18'
}

View File

@@ -1,15 +1,20 @@
# note: call scripts from /scripts
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
BUILDER ?= reloader-builder
OS ?= linux
ARCH ?= ???
ALL_ARCH ?= arm64 arm amd64
BUILDER ?= reloader-builder-${ARCH}
BINARY ?= Reloader
DOCKER_IMAGE ?= stakater/reloader
# Default value "dev"
DOCKER_TAG ?= 1.0.0
REPOSITORY = ${DOCKER_IMAGE}:${DOCKER_TAG}
TAG ?= v0.0.75.0
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
VERSION=$(shell cat .version)
VERSION ?= 0.0.1
BUILD=
GOCMD = go
@@ -25,10 +30,35 @@ build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
builder-image:
@docker build --network host -t "${BUILDER}" -f build/package/Dockerfile.build .
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
reloader-${ARCH}.tar:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
binary-image: builder-image
@docker run --network host --rm "${BUILDER}" | docker build --network host -t "${REPOSITORY}" -f Dockerfile.run -
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
push:
docker push ${REPOSITORY_ARCH}
release: binary-image push manifest
release-all:
-rm -rf ~/.docker/manifests/*
# Make arch-specific release
@for arch in $(ALL_ARCH) ; do \
echo Make release: $$arch ; \
make release ARCH=$$arch ; \
done
set -e
docker manifest push --purge $(REPOSITORY_GENERIC)
manifest:
set -e
docker manifest create -a $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
test:
"$(GOCMD)" test -timeout 1800s -v ./...
@@ -37,15 +67,29 @@ stop:
@docker stop "${BINARY}"
clean-images: stop
@docker rmi "${BUILDER}" "${BINARY}"
-docker rmi "${BINARY}"
@for arch in $(ALL_ARCH) ; do \
echo Clean image: $$arch ; \
make clean-image ARCH=$$arch ; \
done
-docker rmi "${REPOSITORY_GENERIC}"
clean-image:
-docker rmi "${BUILDER}"
-docker rmi "${REPOSITORY_ARCH}"
-rm -rf ~/.docker/manifests/*
clean:
"$(GOCMD)" clean -i
push: ## push the latest Docker image to DockerHub
docker push $(REPOSITORY)
-rm -rf reloader-*.tar
apply:
kubectl apply -f deployments/manifests/ -n temp-reloader
deploy: binary-image push apply
# Bump Chart
bump-chart:
sed -i "s/^version:.*/version: $(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/^appVersion:.*/appVersion: $(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml

View File

@@ -22,7 +22,6 @@ Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades o
## Compatibility
Reloader is compatible with kubernetes >= 1.9
The `apiVersion: rbac.authorization.k8s.io/v1beta1` is depreciated since kubernetes = 1.17. To run it with older versions, please use the chart parameter `reloader.legacy.rbac=true`
## How to use Reloader
@@ -202,12 +201,6 @@ helm repo update
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
```
**Note:** The latest verion of reloader is using `apiVersion: rbac.authorization.k8s.io/v1` for rbac. The `apiVersion: rbac.authorization.k8s.io/v1beta1` is depreciated since kubernetes = 1.17. To run it with older versions, please use below command.
```bash
helm install stakater/reloader --set reloader.legacy.rbac=true # For helm3 add --generate-name flag or set the release name
```
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
```bash
@@ -225,6 +218,8 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` to `true` in values.yaml file.
## Help
### Documentation

View File

@@ -1,6 +1,8 @@
FROM golang:1.13.9-alpine
FROM golang:1.15.2-alpine
LABEL maintainer "Stakater Team"
ARG GOARCH=amd64
RUN apk -v --update \
--no-cache \
add git build-base
@@ -13,7 +15,7 @@ RUN go mod download
COPY . .
ENV CGO_ENABLED=0 GOOS=linux GOARCH=amd64
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader

View File

@@ -8,4 +8,7 @@ COPY Reloader /bin/Reloader
# On alpine 'nobody' has uid 65534
USER 65534
# Port for metrics and probes
EXPOSE 9090
ENTRYPOINT ["/bin/Reloader"]

View File

@@ -3,27 +3,29 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.73
appVersion: v0.0.73
version: 0.0.78
appVersion: 0.0.78
keywords:
- Reloader
- kubernetes
home: https://github.com/stakater/Reloader
sources:
- https://github.com/stakater/IngressMonitorController
- https://github.com/stakater/IngressMonitorController
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
maintainers:
- name: Stakater
email: hello@stakater.com
- name: rasheedamir
email: rasheed@aurorasolutions.io
- name: waseem-h
email: waseemhassan@stakater.com
- name: faizanahmad055
email: faizan.ahmad55@outlook.com
- name: kahootali
email: ali.kahoot@aurorasolutions.io
- name: ahmadiq
email: ahmad@aurorasolutions.io
- name: ahsan-storm
email: ahsanmuhammad1@outlook.com
- name: Stakater
email: hello@stakater.com
- name: rasheedamir
email: rasheed@aurorasolutions.io
- name: waseem-h
email: waseemhassan@stakater.com
- name: faizanahmad055
email: faizan.ahmad55@outlook.com
- name: kahootali
email: ali.kahoot@aurorasolutions.io
- name: ahmadiq
email: ahmad@aurorasolutions.io
- name: ahsan-storm
email: ahsanmuhammad1@outlook.com
- name: ahmedwaleedmalik
email: waleed@stakater.com

View File

@@ -5,6 +5,7 @@ approvers:
- waseem-h
- rasheedamir
- ahsan-storm
- ahmedwaleedmalik
reviewers:
- faizanahmad055
- kahootali
@@ -12,3 +13,4 @@ reviewers:
- waseem-h
- rasheedamir
- ahsan-storm
- ahmedwaleedmalik

View File

@@ -1,8 +1,8 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: ClusterRole
metadata:

View File

@@ -1,8 +1,8 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: ClusterRoleBinding
metadata:

View File

@@ -89,6 +89,19 @@ spec:
fieldPath: metadata.namespace
{{- end }}
{{- end }}
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
readinessProbe:
httpGet:
path: /metrics
port: http
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/

View File

@@ -1,8 +1,8 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: Role
metadata:

View File

@@ -1,14 +1,14 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: RoleBinding
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}

View File

@@ -22,5 +22,8 @@ spec:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
ports:
{{ toYaml .Values.reloader.service.ports | indent 4 }}
{{- end }}
- port: {{ .Values.reloader.service.port }}
name: http
protocol: TCP
targetPort: http
{{- end }}

View File

@@ -0,0 +1,31 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.serviceMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceMonitor.labels }}
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
{{- end }}
name: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.serviceMonitor.namespace }}
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
{{- end }}
spec:
endpoints:
- targetPort: http
path: "/metrics"
{{- if .Values.reloader.serviceMonitor.interval }}
interval: {{ .Values.reloader.serviceMonitor.interval }}
{{- end }}
{{- if .Values.reloader.serviceMonitor.timeout }}
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
{{- end }}
jobLabel: {{ template "reloader-fullname" . }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{ include "reloader-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -51,10 +51,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.73
version: v0.0.77
image:
name: stakater/reloader
tag: "v0.0.73"
tag: v0.0.78
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -81,11 +81,7 @@ reloader:
service: {}
# labels: {}
# annotations: {}
# ports:
# - port: 9090
# name: http
# protocol: TCP
# targetPort: 9090
# port: 9090
rbac:
enabled: true
@@ -104,3 +100,15 @@ reloader:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}
serviceMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

View File

@@ -1,7 +1,7 @@
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"

View File

@@ -1,7 +1,7 @@
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"

View File

@@ -8,13 +8,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.73
version: v0.0.77
name: reloader-reloader
spec:
@@ -28,19 +28,31 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.73
version: v0.0.77
spec:
containers:
- image: "stakater/reloader:v0.0.73"
- image: "stakater/reloader:v0.0.77"
imagePullPolicy: IfNotPresent
name: reloader-reloader
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
readinessProbe:
httpGet:
path: /metrics
port: http
securityContext:
runAsNonRoot: true
runAsUser: 65534

View File

@@ -1,3 +1,4 @@
---
# Source: reloader/templates/service.yaml

View File

@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"

View File

@@ -0,0 +1,4 @@
---
# Source: reloader/templates/servicemonitor.yaml

View File

@@ -1,7 +1,7 @@
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
@@ -50,7 +50,7 @@ rules:
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
@@ -58,7 +58,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
@@ -83,13 +83,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.73
version: v0.0.77
name: reloader-reloader
spec:
@@ -103,19 +103,31 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.73
version: v0.0.77
spec:
containers:
- image: "stakater/reloader:v0.0.73"
- image: "stakater/reloader:v0.0.77"
imagePullPolicy: IfNotPresent
name: reloader-reloader
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
readinessProbe:
httpGet:
path: /metrics
port: http
securityContext:
runAsNonRoot: true
runAsUser: 65534
@@ -133,6 +145,7 @@ spec:
---
# Source: reloader/templates/service.yaml
---
# Source: reloader/templates/serviceaccount.yaml
@@ -144,9 +157,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader
---
# Source: reloader/templates/servicemonitor.yaml

View File

@@ -81,11 +81,7 @@ reloader:
service: {}
# labels: {}
# annotations: {}
# ports:
# - port: 9090
# name: http
# protocol: TCP
# targetPort: 9090
# port: 9090
rbac:
enabled: true
@@ -104,3 +100,15 @@ reloader:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}
serviceMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

41
docs/Container Build.md Normal file
View File

@@ -0,0 +1,41 @@
# Container Build
> **WARNING:** As a user of Reloader there is no need to build containers, these are freely available here: https://hub.docker.com/r/stakater/reloader/
Multi-architecture approach is based on original work by @mdh02038: https://github.com/mdh02038/Reloader
Images tested on linux/arm, linux/arm64 and linux/amd64.
# Install Pre-Reqs
The build environment requires the following packages (tested on Ubuntu 20.04):
* golang
* make
* qemu (for arm, arm64 etc. emulation)
* binfmt-support
* Docker engine
## Docker
Follow instructions here: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
Once installed, enable the experimental CLI:
```
export DOCKER_CLI_EXPERIMENTAL=enabled
```
Login, to enable publishing of packages:
```
sudo docker login
```
## Remaining Pre-Reqs
Remaining Pre-Reqs can be installed via:
```
sudo apt install golang make qemu-user-static binfmt-support -y
```
# Publish Multi-Architecture Image
To build/ publish multi-arch Docker images clone repository and execute from repository root:
```
sudo make release-all
```
# Additional Links/ Info
* *https://medium.com/@artur.klauser/building-multi-architecture-docker-images-with-buildx-27d80f7e2408

4
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/stakater/Reloader
go 1.13
go 1.15
require (
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
@@ -12,8 +12,6 @@ require (
github.com/prometheus/client_golang v1.4.1
github.com/sirupsen/logrus v1.4.2
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90

7
go.sum
View File

@@ -161,8 +161,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -239,6 +237,7 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
@@ -249,8 +248,6 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -258,8 +255,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=

View File

@@ -57,12 +57,8 @@ func NewController(
// Add function to add a new object to the queue in case of creating a resource
func (c *Controller) Add(obj interface{}) {
if !c.resourceInIgnoredNamespace(obj) {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
Collectors: c.collectors,
})
}
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
}
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {

View File

@@ -1,11 +1,12 @@
package controller
import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"os"
"testing"
"time"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/handler"
@@ -29,6 +30,10 @@ var (
collectors = metrics.NewCollectors()
)
const (
sleepDuration = 3 * time.Second
)
func TestMain(m *testing.M) {
testutil.CreateNamespace(namespace, clients.KubernetesClient)
@@ -45,7 +50,7 @@ func TestMain(m *testing.M) {
defer close(stop)
go c.Run(1, stop)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
logrus.Infof("Running Testcases")
retCode := m.Run()
@@ -95,7 +100,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
if !updated {
t.Errorf("DeploymentConfig was not updated")
}
time.Sleep(5 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
@@ -108,7 +113,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create env var upon updating the configmap
@@ -147,7 +152,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -160,7 +165,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create env var upon updating the configmap
@@ -199,7 +204,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -212,12 +217,15 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create env var upon creating the configmap
func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
// TODO: Fix this test case
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
// Creating configmap
configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5)
_, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
@@ -237,14 +245,14 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com")
if err != nil {
t.Errorf("Error while creating the configmap second time %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
@@ -260,7 +268,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -273,7 +281,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and update env var upon updating the configmap
@@ -319,7 +327,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -332,7 +340,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap
@@ -370,7 +378,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment
if updated {
t.Errorf("Deployment should not be updated by changing label")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -383,11 +391,15 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon creating the secret
func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
// TODO: Fix this test case
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
// Creating secret
secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5)
_, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
@@ -406,14 +418,14 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData)
if err != nil {
t.Errorf("Error in secret creation: %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
@@ -425,7 +437,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
Annotation: options.SecretUpdateOnChangeAnnotation,
}
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
@@ -442,7 +454,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon updating the secret
@@ -492,7 +504,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and update env var upon updating the secret
@@ -548,7 +560,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
@@ -597,7 +609,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap
@@ -635,7 +647,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting DaemonSet
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
@@ -648,7 +660,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap
@@ -672,7 +684,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
t.Errorf("Configmap was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Updating configmap for second time
updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io")
@@ -680,7 +692,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
t.Errorf("Configmap was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Verifying DaemonSet update
logrus.Infof("Verifying env var has been updated")
@@ -696,7 +708,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting DaemonSet
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
@@ -709,7 +721,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon updating the secret
@@ -759,7 +771,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on DaemonSet and update env var upon updating the secret
@@ -782,7 +794,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
if err != nil {
t.Errorf("Error while updating secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Updating Secret
err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData)
@@ -816,7 +828,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
@@ -865,7 +877,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap
@@ -903,7 +915,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting StatefulSet
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
@@ -916,7 +928,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap
@@ -960,7 +972,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting StatefulSet
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
@@ -973,7 +985,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon updating the secret
@@ -1023,7 +1035,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on StatefulSet and update env var upon updating the secret
@@ -1079,7 +1091,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
func TestController_resourceInIgnoredNamespace(t *testing.T) {

View File

@@ -15,7 +15,6 @@ import (
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testclient "k8s.io/client-go/kubernetes/fake"
)
@@ -657,11 +656,12 @@ func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolume(t *testing.T)
}
}
func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
configmapObj.Annotations = annotations
return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
}
// Un-used function
// func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
// configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
// configmapObj.Annotations = annotations
// return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
// }
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotation(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
@@ -720,7 +720,10 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t
if err != nil {
t.Errorf("Failed to create deployment with search annotation.")
}
defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
defer func() {
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
}()
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")