Compare commits

..

10 Commits

Author SHA1 Message Date
stakater-user
4d6da476ee Bump Version to 0.0.12 2018-08-10 00:34:51 +00:00
Ahmad Iqbal Ali
d5ea5d810d add docs link in README (#21) 2018-08-10 05:13:30 +05:00
stakater-user
0d10b35d3a Bump Version to 0.0.11 2018-08-03 21:38:04 +00:00
Rasheed Amir
807e0c0c1b cleanup the readme 2018-08-03 23:16:33 +02:00
stakater-user
052bbb23e5 Bump Version to 0.0.10 2018-08-02 10:27:58 +00:00
Ahmad Iqbal Ali
28fb50598c use generic slack details (#18)
since we don't have a reloader channel at the moment.
2018-08-02 12:05:53 +02:00
stakater-user
a3e4c3a4d7 Bump Version to 0.0.9 2018-08-02 09:39:08 +00:00
Faizan Ahmad
d3bae0d3bb Optimize logging in reloader (#19)
* Optimize logging in reloader

* Fix test case failing issue

* Implement PR-19 review comments

* Place the log out of loop

* Fix change detection log
2018-08-02 11:17:35 +02:00
stakater-user
64d12a7c31 Bump Version to 0.0.8 2018-08-01 19:11:12 +00:00
Faizan Ahmad
078fc034d2 Add doc how to verify reloader working (#20)
* Add doc how to verify reloader working

* fix the text
2018-08-01 20:49:43 +02:00
14 changed files with 119 additions and 97 deletions

View File

@@ -1 +1 @@
0.0.7
0.0.12

View File

@@ -42,6 +42,8 @@ You can apply vanilla manifests by running the following command
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
```
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
### Helm Charts
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
@@ -54,21 +56,20 @@ helm repo update
helm install stakater/reloader
```
## Monitor All namespaces
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
## Help
**Got a question?**
### Documentation
You can find more documentation [here](docs/)
### Have a question?
File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us an [email](mailto:stakater@gmail.com).
### Talk to us on Slack
Join and talk to us on the #tools-imc channel for discussing Reloader
Join and talk to us on Slack for discussing Reloader
[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://stakater-slack.herokuapp.com/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater.slack.com/messages/CAN960CTG/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater.slack.com/)
## Contributing

View File

@@ -3,7 +3,7 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: 0.0.7
version: 0.0.12
keywords:
- Reloader
- kubernetes

View File

@@ -7,9 +7,9 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: 0.0.7
version: 0.0.12
image:
name: stakater/reloader
tag: "0.0.7"
tag: "0.0.12"
pullPolicy: IfNotPresent
watchGlobally: true

View File

@@ -7,8 +7,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- env:
image: "stakater/reloader:0.0.7"
image: "stakater/reloader:0.0.12"
imagePullPolicy: IfNotPresent
name: reloader
serviceAccountName: reloader

View File

@@ -7,8 +7,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
@@ -20,8 +20,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role
@@ -57,8 +57,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role-binding

View File

@@ -7,8 +7,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- env:
image: "stakater/reloader:0.0.7"
image: "stakater/reloader:0.0.12"
imagePullPolicy: IfNotPresent
name: reloader
serviceAccountName: reloader
@@ -43,8 +43,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
@@ -56,8 +56,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role
@@ -93,8 +93,8 @@ metadata:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.7
chart: "reloader-0.0.7"
version: 0.0.12
chart: "reloader-0.0.12"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role-binding

View File

@@ -0,0 +1,51 @@
# Verify Reloader's Working
Reloader's working can be verified by two ways.
## Verify from logs
Check the logs of reloader and verify that you can see logs looks like below, if you are able to find these logs then it means reloader is working.
```text
Changes Detected in test-object of type 'SECRET' in namespace: test-reloader
Updated test-resource of type Deployment in namespace: test-reloader
```
Below are the details that explain these logs:
### test-object
`test-object` is the name of a `secret` or a `deployment` in which change has been detected.
### SECRET
`SECRET` is the type of `test-object`. It can either be `SECRET` or `CONFIGMAP`
### test-reloader
`test-reloader` is the name of namespace in which reloader has detected the change.
### test-resource
`test-resource` is the name of resource which is going to be updated
### Deployment
`Deployment` is the type of `test-resource`. It can either be a `Deployment`, `Daemonset` or `Statefulset`
## Verify by checking the age of Pod
A pod's age can tell whether reloader is working correctly or not. If you know that a change in a `secret` or `configmap` has occurred, then check the relevant Pod's age immediately. It should be newly created few moments ago.
### Verify from kubernetes Dashboard
`kubernetes dashboard` can be used to verify the working of Reloader. After a change in `secret` or `configmap`, check the relevant Pod's age from dashboard. It should be newly created few moments ago.
### Verify from command line
After a change in `secret` or `configmap`. Run the below mentioned command and verify that the pod is newly created.
```bash
kubectl get pods <pod name> -n <namespace name>
```

View File

@@ -2,9 +2,9 @@ package constants
const (
// ConfigmapEnvVarPostfix is a postfix for configmap envVar
ConfigmapEnvVarPostfix = "_CONFIGMAP"
ConfigmapEnvVarPostfix = "CONFIGMAP"
// SecretEnvVarPostfix is a postfix for secret envVar
SecretEnvVarPostfix = "_SECRET"
SecretEnvVarPostfix = "SECRET"
// EnvVarPrefix is a Prefix for environment variable
EnvVarPrefix = "STAKATER_"
)

View File

@@ -25,7 +25,6 @@ var (
func TestMain(m *testing.M) {
logrus.Infof("Creating namespace %s", namespace)
testutil.CreateNamespace(namespace, client)
logrus.Infof("Creating controller")
@@ -45,7 +44,6 @@ func TestMain(m *testing.M) {
logrus.Infof("Running Testcases")
retCode := m.Run()
logrus.Infof("Deleting namespace %q.\n", namespace)
testutil.DeleteNamespace(namespace, client)
os.Exit(retCode)
@@ -268,7 +266,6 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
//time.Sleep(5 * time.Second)
// Deleting Deployment
err = testutil.DeleteDeployment(client, namespace, secretName)
@@ -330,7 +327,6 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
//time.Sleep(5 * time.Second)
// Deleting Deployment
err = testutil.DeleteDeployment(client, namespace, secretName)
@@ -385,7 +381,6 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t
if updated {
t.Errorf("Deployment should not be updated by changing label in secret")
}
//time.Sleep(5 * time.Second)
// Deleting Deployment
err = testutil.DeleteDeployment(client, namespace, secretName)
@@ -559,7 +554,6 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
//time.Sleep(5 * time.Second)
// Deleting DaemonSet
err = testutil.DeleteDaemonSet(client, namespace, secretName)
@@ -622,7 +616,6 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
//time.Sleep(5 * time.Second)
// Deleting DaemonSet
err = testutil.DeleteDaemonSet(client, namespace, secretName)
@@ -677,7 +670,6 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *
if updated {
t.Errorf("DaemonSet should not be updated by changing label in secret")
}
//time.Sleep(5 * time.Second)
// Deleting DaemonSet
err = testutil.DeleteDaemonSet(client, namespace, secretName)
@@ -851,7 +843,6 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
//time.Sleep(5 * time.Second)
// Deleting StatefulSet
err = testutil.DeleteStatefulSet(client, namespace, secretName)
@@ -913,7 +904,6 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
//time.Sleep(5 * time.Second)
// Deleting StatefulSet
err = testutil.DeleteStatefulSet(client, namespace, secretName)

View File

@@ -2,7 +2,6 @@ package handler
import (
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
)
// ResourceCreatedHandler contains new objects
@@ -13,17 +12,7 @@ type ResourceCreatedHandler struct {
// Handle processes the newly created resource
func (r ResourceCreatedHandler) Handle() error {
if r.Resource == nil {
logrus.Errorf("Error in Handler")
} else {
logrus.Infof("Detected changes in object %s", r.Resource)
// process resource based on its type
if _, ok := r.Resource.(*v1.ConfigMap); ok {
logrus.Infof("A 'configmap' has been 'Added' but no implementation found to take action")
} else if _, ok := r.Resource.(*v1.Secret); ok {
logrus.Infof("A 'secret' has been 'Added' but no implementation found to take action")
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}
logrus.Errorf("Resource creation handler received nil resource")
}
return nil
}

View File

@@ -23,47 +23,44 @@ type ResourceUpdatedHandler struct {
// Handle processes the updated resource
func (r ResourceUpdatedHandler) Handle() error {
if r.Resource == nil || r.OldResource == nil {
logrus.Errorf("Error in Handler")
logrus.Errorf("Resource update handler received nil resource")
} else {
logrus.Infof("Detected changes in object %s", r.Resource)
// process resource based on its type
rollingUpgrade(r, callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentItems,
ContainersFunc: callbacks.GetDeploymentContainers,
UpdateFunc: callbacks.UpdateDeployment,
ResourceType: "Deployment",
})
rollingUpgrade(r, callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDaemonSetItems,
ContainersFunc: callbacks.GetDaemonSetContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
ResourceType: "DaemonSet",
})
rollingUpgrade(r, callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetStatefulSetItems,
ContainersFunc: callbacks.GetStatefulsetContainers,
UpdateFunc: callbacks.UpdateStatefulset,
ResourceType: "StatefulSet",
})
config, envVarPostfix, oldSHAData := getConfig(r)
if config.SHAValue != oldSHAData {
logrus.Infof("Changes detected in %s of type '%s' in namespace: %s", config.ResourceName, envVarPostfix, config.Namespace)
// process resource based on its type
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentItems,
ContainersFunc: callbacks.GetDeploymentContainers,
UpdateFunc: callbacks.UpdateDeployment,
ResourceType: "Deployment",
})
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDaemonSetItems,
ContainersFunc: callbacks.GetDaemonSetContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
ResourceType: "DaemonSet",
})
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetStatefulSetItems,
ContainersFunc: callbacks.GetStatefulsetContainers,
UpdateFunc: callbacks.UpdateStatefulset,
ResourceType: "StatefulSet",
})
}
}
return nil
}
func rollingUpgrade(r ResourceUpdatedHandler, upgradeFuncs callbacks.RollingUpgradeFuncs) {
func rollingUpgrade(r ResourceUpdatedHandler, config util.Config, envarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) {
client, err := kube.GetClient()
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
config, envVarPostfix, oldSHAData := getConfig(r)
if config.SHAValue != oldSHAData {
err = PerformRollingUpgrade(client, config, envVarPostfix, upgradeFuncs)
if err != nil {
logrus.Fatalf("Rolling upgrade failed with error = %v", err)
}
} else {
logrus.Infof("Rolling upgrade will not happend because no actual change in data has been detected")
err = PerformRollingUpgrade(client, config, envarPostfix, upgradeFuncs)
if err != nil {
logrus.Errorf("Rolling upgrade for %s failed with error = %v", config.ResourceName, err)
}
}
@@ -71,12 +68,10 @@ func getConfig(r ResourceUpdatedHandler) (util.Config, string, string) {
var oldSHAData, envVarPostfix string
var config util.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
logrus.Infof("Performing 'Updated' action for resource of type 'configmap'")
oldSHAData = getSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
config = getConfigmapConfig(r)
envVarPostfix = constants.ConfigmapEnvVarPostfix
} else if _, ok := r.Resource.(*v1.Secret); ok {
logrus.Infof("Performing 'Updated' action for resource of type 'secret'")
oldSHAData = getSHAfromSecret(r.OldResource.(*v1.Secret).Data)
config = getSecretConfig(r)
envVarPostfix = constants.SecretEnvVarPostfix
@@ -112,6 +107,7 @@ func PerformRollingUpgrade(client kubernetes.Interface, config util.Config, enva
var err error
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
resourceName := util.ToObjectMeta(i).Name
// find correct annotation and update the resource
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
if annotationValue != "" {
@@ -120,13 +116,13 @@ func PerformRollingUpgrade(client kubernetes.Interface, config util.Config, enva
if value == config.ResourceName {
updated := updateContainers(containers, value, config.SHAValue, envarPostfix)
if !updated {
logrus.Warnf("Rolling upgrade did not happen")
logrus.Warnf("Rolling upgrade failed because no container found to add environment variable in %s of type %s in namespace: %s", resourceName, upgradeFuncs.ResourceType, config.Namespace)
} else {
err = upgradeFuncs.UpdateFunc(client, config.Namespace, i)
if err != nil {
logrus.Errorf("Update %s failed %v", upgradeFuncs.ResourceType, err)
logrus.Errorf("Update for %s of type %s in namespace %s failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
} else {
logrus.Infof("Updated %s of type %s", config.ResourceName, upgradeFuncs.ResourceType)
logrus.Infof("Updated %s of type %s in namespace: %s ", resourceName, upgradeFuncs.ResourceType, config.Namespace)
}
break
}
@@ -139,8 +135,7 @@ func PerformRollingUpgrade(client kubernetes.Interface, config util.Config, enva
func updateContainers(containers []v1.Container, annotationValue string, shaData string, envarPostfix string) bool {
updated := false
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + envarPostfix
logrus.Infof("Generated environment variable: %s", envar)
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue)+ "_" + envarPostfix
for i := range containers {
envs := containers[i].Env
@@ -155,7 +150,6 @@ func updateContainers(containers []v1.Container, annotationValue string, shaData
}
containers[i].Env = append(containers[i].Env, e)
updated = true
logrus.Infof("%s environment variable does not exist, creating a new envVar", envar)
}
}
return updated
@@ -164,9 +158,7 @@ func updateContainers(containers []v1.Container, annotationValue string, shaData
func updateEnvVar(envs []v1.EnvVar, envar string, shaData string) bool {
for j := range envs {
if envs[j].Name == envar {
logrus.Infof("%s environment variable found", envar)
if envs[j].Value != shaData {
logrus.Infof("Updating %s", envar)
envs[j].Value = shaData
return true
}

View File

@@ -22,7 +22,7 @@ var (
func TestMain(m *testing.M) {
logrus.Infof("Creating namespace %s", namespace)
// Creating namespace
testutil.CreateNamespace(namespace, client)
logrus.Infof("Setting up the test resources")

View File

@@ -236,7 +236,6 @@ func GetResourceSHA(containers []v1.Container, envar string) string {
//ConvertResourceToSHA generates SHA from secret or configmap data
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
values := []string{}
logrus.Infof("Generating SHA for secret data")
if resourceType == SecretResourceType {
secret := GetSecret(namespace, resourceName, data)
for k, v := range secret.Data {
@@ -391,7 +390,7 @@ func VerifyResourceUpdate(client kubernetes.Interface, config util.Config, envVa
}
}
if matches {
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + envVarPostfix
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + "_" + envVarPostfix
updated := GetResourceSHA(containers, envName)
if updated == config.SHAValue {