update doc

This commit is contained in:
Eugenio Marzo
2026-03-14 14:22:35 +01:00
parent 28bfb3fc57
commit c104bb4fdc
12 changed files with 292 additions and 462 deletions

333
README.md
View File

@@ -17,20 +17,15 @@ Here are the slides (https://www.slideshare.net/EugenioMarzo/kubeinvaders-chaos-
# Table of Contents
1. [Description](#Description)
2. [Installation - Helm with ClusterIP Service + Nginx Ingress](#Installation-default)
2. [Installation - Helm with NodePort Service](#Installation-nodeport)
2. [Installation - Using Podman or Docker](#Installation-podman)
3. [Usage](#Usage)
4. [URL Monitoring During Chaos Session](#URL-Monitoring-During-Chaos-Session)
5. [Persistence](#Persistence)
6. [Generic Troubleshooting & Known Problems](#Generic-Troubleshooting-And-Known-Problems)
7. [Troubleshooting Unknown Namespace](#Troubleshooting-Unknown-Namespace)
8. [Metrics](#Metrics)
9. [Security](#Security)
10. [Roadmap](#Roadmap)
11. [Community](#Community)
12. [Community blogs and videos](#Community-blogs-and-videos)
13. [License](#License)
9. [Community](#Community)
10. [Community blogs and videos](#Community-blogs-and-videos)
11. [License](#License)
## Description
@@ -38,212 +33,29 @@ Inspired by the classic Space Invaders game, Kubeinvaders offers a playful and e
## Installation-default
If you need a lab kubernetes cluster you can use this setup via Make and Minikube. Follow [this readme](./minikube-setup/README.md)
**Helm installation is currently not supported.**
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/kubeinvaders)](https://artifacthub.io/packages/search?repo=kubeinvaders)
The easiest way to run KubeInvaders is directly with Podman or Docker.
Run with Podman:
```bash
helm repo add kubeinvaders https://lucky-sideburn.github.io/helm-charts/
helm repo update
kubectl create namespace kubeinvaders
# With ingress and TLS enabled
helm install --set-string config.target_namespace="namespace1\,namespace2" --set ingress.enabled=true --set ingress.hostName=kubeinvaders.local --set deployment.image.tag=latest -n kubeinvaders kubeinvaders kubeinvaders/kubeinvaders --set ingress.tls_enabled=true
# With ingress enabled but TLS disabled (in case you have a reverse-proxy that does TLS termination and nginx controller in http)
helm install --set-string config.target_namespace="namespace1\,namespace2" --set ingress.enabled=true --set ingress.hostName=kubeinvaders.local --set deployment.image.tag=latest -n kubeinvaders kubeinvaders kubeinvaders/kubeinvaders/ --set ingress.tls_enabled=false
podman run -p 8080:8080 docker.io/luckysideburn/kubeinvaders:latest
```
### Example for K3S
Run with Docker:
```bash
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik" sh -s -
cat >/tmp/ingress-nginx.yaml <<EOF
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: ingress-nginx
namespace: kube-system
spec:
chart: ingress-nginx
repo: https://kubernetes.github.io/ingress-nginx
targetNamespace: ingress-nginx
version: v4.9.0
set:
valuesContent: |-
fullnameOverride: ingress-nginx
controller:
kind: DaemonSet
hostNetwork: true
hostPort:
enabled: true
service:
enabled: false
publishService:
enabled: false
metrics:
enabled: false
serviceMonitor:
enabled: false
config:
use-forwarded-headers: "true"
EOF
kubectl create -f /tmp/ingress-nginx.yaml
kubectl create ns namespace1
kubectl create ns namespace2
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
helm install kubeinvaders --set-string config.target_namespace="namespace1\,namespace2" \
-n kubeinvaders kubeinvaders/kubeinvaders --set ingress.enabled=true --set ingress.hostName=kubeinvaders.io --set deployment.image.tag=latest
docker run --rm -p 8080:8080 docker.io/luckysideburn/kubeinvaders:latest
```
### Install to Kubernetes with Helm (v3+) - LoadBalancer / HTTP (tested with GKE)
Then open:
```bash
helm install kubeinvaders --set-string config.target_namespace="namespace1\,namespace2" -n kubeinvaders kubeinvaders/kubeinvaders --set ingress.enabled=true --set ingress.hostName=kubeinvaders.local --set deployment.image.tag=latest --set service.type=LoadBalancer --set service.port=80
kubectl set env deployment/kubeinvaders DISABLE_TLS=true -n kubeinvaders
http://localhost:8080
```
### SCC for Openshift
```bash
oc adm policy add-scc-to-user anyuid -z kubeinvaders
```
### Route for Openshift
```bash
apiVersion: route.openshift.io/v1
kind: Route
metadata:
name: kubeinvaders
namespace: "kubeinvaders"
spec:
host: "kubeinvaders.io"
to:
name: kubeinvaders
tls:
termination: Edge
```
## Add simple nginx Deployment for Pods to shot at
```bash
cat >deployment.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 20 # tells deployment to run 20 pods matching the template
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.24.0
ports:
- containerPort: 81
EOF
```
Apply Nginx Deployment in namespace1 and namespace2
```bash
sudo kubectl apply -f deployment.yaml -n namespace1
sudo kubectl apply -f deployment.yaml -n namespace2
```
## Installation-nodeport
Let's say we have a Layer 4 or Layer 7 Load Balancer that redirects traffic directly to the KubeInvaders Service Node Port.
For example, consider this HaProxy configuration. We don't want to use TLS in this scenario (just for experimentation).
Remember to disable TLS: **kubectl set env deployment/kubeinvaders DISABLE_TLS=true -n kubeinvaders**
(TODO: put this into values of the Helm)
**HaProxy Configuration**
```bash
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
# utilize system-wide crypto-policies
ssl-default-bind-ciphers PROFILE=SYSTEM
ssl-default-server-ciphers PROFILE=SYSTEM
defaults
mode tcp
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend mylb
bind *:80
default_backend mynodeport
backend mynodeport
balance roundrobin
```
**Installation steps using NodePort**
```bash
helm repo add kubeinvaders https://lucky-sideburn.github.io/helm-charts/ && helm repo list
VERSION=latest
helm install kubeinvaders kubeinvaders/kubeinvaders \
--version=$VERSION \
--namespace kubeinvaders \
--create-namespace \
--set service.type=NodePort \
--set service.nodePort=30016 \
--set ingress.enabled=false \
--set config.target_namespace="default\,namespace1" \
--set route_host=foobar.local
kubectl set env deployment/kubeinvaders DISABLE_TLS=true -n kubeinvaders
```
## Installation-podman
### Run through Docker or Podman
If you want to run KubeInvaders against your own Kubernetes cluster, create the required RBAC components (assumes k8s v1.24+):
Create the required components (assumes k8s v1.24+):
@@ -353,72 +165,6 @@ Create two namespaces:
kubectl create namespace namespace1
kubectl create namespace namespace2
```
Run the container:
```bash
podman run -p 8080:8080 \
--env K8S_TOKEN=**** \
--env APPLICATION_URL=http://localhost:8080 \
--env DISABLE_TLS=true \
--env KUBERNETES_SERVICE_HOST=10.10.10.4 \
--env KUBERNETES_SERVICE_PORT_HTTPS=6443 \
--env NAMESPACE=namespace1,namespace2 \
luckysideburn/kubeinvaders:latest
```
Given this example, you can access k-inv at the following address: [http://localhost:3131](http://localhost:3131)
- Please pay attention to the command "podman run -p 3131:8080". Forwarding port 8080 is important.
- We suggest using `DISABLE_TLS=true` for local development environments.
- Follow the instructions above to create the token for `K8S_TOKEN`.
- In the example, we use image tag `latest`, use `latest_debug` for debugging.
#### Params
##### K8S_TOKEN
These are the permissions your service account must have. You can take an example from [this clusterrole](https://github.com/lucky-sideburn/kubeinvaders/blob/master/helm-charts/kubeinvaders/templates/rbac-cluster.yaml).
- apiGroups: [""]
resources: ["pods", "pods/log"]
verbs: ["delete"]
- apiGroups: ["batch", "extensions"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "watch", "list"]
##### APPLICATION_URL
URL of the web console
##### DISABLE_TLS
Disable HTTPS for the web console
##### KUBERNETES_SERVICE_HOST
IP address or DNS name of your control plane.
##### KUBERNETES_SERVICE_PORT_HTTPS
TCP port of the target control plane.
#### NAMESPACE
List the namespaces you want to stress or on which you want to see logs (logs are a beta feature, they might not work or could slow down the browser...).
```bash
podman run -p 8080:8080 \
--env K8S_TOKEN=*** \
--env APPLICATION_URL=http://localhost:8080 \
--env DISABLE_TLS=true \
--env KUBERNETES_SERVICE_HOST=10.10.10.4 \
--env KUBERNETES_SERVICE_PORT_HTTPS=6443 \
--env NAMESPACE=namespace1,namespace2 \
luckysideburn/kubeinvaders:latest
```
## Usage
@@ -488,7 +234,7 @@ Follow real time charts during the experiment
K-inv uses Redis to save and manage data. Redis is configured with "appendonly."
Currently, the Helm chart does not support PersistentVolumes, but this task is on the to-do list...
The legacy Helm chart does not support PersistentVolumes.
## Generic Troubleshooting and Known Problems
- If you don't see aliens, please follow these steps: ![Alt Text](https://github.com/lucky-sideburn/kubeinvaders/issues/100#event-18433067619)
@@ -503,7 +249,7 @@ Currently, the Helm chart does not support PersistentVolumes, but this task is o
## Troubleshooting Unknown Namespace
- Check if the namespaces declared with helm config.target_namespace (e.g., config.target_namespace="namespace1\,namespace2") exist and contain some pods.
- Check if the namespaces configured in the UI (for example: namespace1,namespace2) exist and contain pods.
- Check your browser's developer console for any failed HTTP requests (send them to luckysideburn[at]gmail[dot]com or open an issue on this repo).
- Try using latest_debug and send logs to luckysideburn[at]gmail[dot]com or open an issue on this repo.
@@ -536,55 +282,6 @@ Example of metrics:
![Alt Text](./doc_images/grafana2.png)
## Security
In order to restrict the access to the Kubeinvaders endpoint add this annotation into the ingress.
```yaml
nginx.ingress.kubernetes.io/whitelist-source-range: <your_ip>/32
```
## Roadmap
Roadmap: Chaos Engineering Platform Enhancement
Phase 1: Authentication and Authorization
Implement robust user authentication:
Allow for both local and external authentication (e.g., LDAP, OAuth)
Securely store user credentials
Introduce role-based access control (RBAC):
Define granular permissions based on user roles (e.g., admin, engineer, viewer)
Enforce authorization at the resource level (namespaces, experiments, etc.)
Phase 2: Analytics and Reporting
Develop namespace-specific statistics:
Track the frequency of chaos engineering sessions per namespace
Visualize trends and patterns over time
Create comprehensive reports:
Generate customizable reports for management
Include metrics on experiment coverage, success rates, and failure rates
Export reporting data:
Allow for data export in various formats (e.g., CSV, JSON, PDF)
Phase 3: API Development
Expose platform functionality via a RESTful API:
Enable integration with other tools and systems
Support CRUD operations for core entities (experiments, scenarios, etc.)
Phase 4: UI Enhancements
Improve user experience:
Redesign the UI for better usability and aesthetics
Optimize performance and responsiveness
Phase 5: LLM Integration for Experiment Creation
Integrate an LLM: Develop an interface that allows users to describe experiments in natural language.
Translate to code: Utilize the LLM to translate natural language descriptions into executable code.
Validate and optimize: Implement mechanisms to validate and optimize the code generated by the LLM.
## Community
Please reach out for news, bugs, feature requests, and other issues via:

View File

@@ -808,7 +808,7 @@ k8s_jobs:
<div id="alert_placeholder" style="margin-top: 3%;"></div>
</div>
<div class="row" style="margin-top: 2%; text-align: center; margin: auto; margin-top: 1%;">
<canvas id="myCanvas" width="1200" height="800" class="game-canvas"></canvas>
<canvas id="myCanvas" width="1200" height="800" class="game-canvas" style="background-color: #000000;"></canvas>
</div>
<div class="row">
<div class="col text-center">

View File

@@ -18,11 +18,17 @@
function setChaosReportURL(select) {
var selectedValue = select.options[select.selectedIndex].value;
if (selectedValue === "No Ingress found") {
return;
}
document.getElementById("chaosReportCheckSiteURL").value = selectedValue;
}
function addElementToSelect(selectId, elementValue) {
var select = document.getElementById(selectId);
if (!select || !elementValue) {
return;
}
var option = document.createElement("option");
option.text = elementValue;
option.value = elementValue;
@@ -30,17 +36,38 @@ function addElementToSelect(selectId, elementValue) {
}
function parseIngressListJSON(ingressList) {
var hostOfIngress = convertStringToArrayWithSeparator(ingressList, ",")
if (hostOfIngress.length > 0) {
document.getElementById("chaosReportCheckSiteURL").value = hostOfIngress[0];
var select = document.getElementById("ingressHostList");
if (!select) {
return;
}
for (i in hostOfIngress) {
if (hostOfIngress[i] != "No Ingress found") {
addElementToSelect("ingressHostList", hostOfIngress[i]);
}
select.innerHTML = "";
var hostOfIngress = [];
if (Array.isArray(ingressList)) {
hostOfIngress = ingressList;
} else if (typeof ingressList === "string") {
hostOfIngress = convertStringToArrayWithSeparator(ingressList, ",");
}
hostOfIngress = hostOfIngress
.map(function (host) {
return String(host || "").trim();
})
.filter(function (host) {
return host !== "" && host !== "No Ingress found";
});
if (hostOfIngress.length === 0) {
addElementToSelect("ingressHostList", "No Ingress found");
return;
}
for (var i = 0; i < hostOfIngress.length; i++) {
addElementToSelect("ingressHostList", hostOfIngress[i]);
}
document.getElementById("chaosReportCheckSiteURL").value = hostOfIngress[0];
}
function resizeCharts() {
@@ -56,12 +83,42 @@ function resizeCharts() {
}
function getIngressLists() {
if (!namespace && configured_namespaces && configured_namespaces.length > 0) {
namespace = configured_namespaces[0];
}
if (!namespace) {
$('#alert_placeholder').replaceWith(alert_div + 'Set at least one namespace before configuring ingress checks.</div>');
return;
}
var oReq = new XMLHttpRequest();
oReq.onreadystatechange = function () {
if (this.readyState === XMLHttpRequest.DONE && this.status === 200) {
parseIngressListJSON(JSON.parse(this.responseText));
oReq.onload = function () {
if (this.status !== 200) {
$('#alert_placeholder').replaceWith(alert_div + 'Ingress lookup failed with status ' + this.status + ' on namespace ' + namespace + '.</div>');
parseIngressListJSON([]);
return;
}
};;
var ingressData = [];
try {
ingressData = JSON.parse(this.responseText);
} catch (e) {
ingressData = [];
}
parseIngressListJSON(ingressData);
if (!Array.isArray(ingressData) || ingressData.length === 0) {
$('#alert_placeholder').replaceWith(alert_div + 'No ingress hosts found in namespace ' + namespace + '. You can type a URL manually.</div>');
}
};
oReq.onerror = function () {
$('#alert_placeholder').replaceWith(alert_div + 'Ingress lookup failed due to network or CORS error. You can type a URL manually.</div>');
parseIngressListJSON([]);
};
var ingressUrl = appendK8sTargetParam(k8s_url + "/kube/ingresses?namespace=" + namespace);
oReq.open("GET", ingressUrl, true);
applyK8sConnectionHeaders(oReq);
@@ -94,7 +151,7 @@ function chaosReportHttpEndpointAdd() {
<div class="row">
<div class="col col-xl-10" style="margin-top: 2%;">
<label for="ingressHostList">Ingress Host List</label>
<select id="ingressHostList" class="form-select" aria-label="Ingress Host List" onclick="setModalState(true)">
<select id="ingressHostList" class="form-select" aria-label="Ingress Host List" onclick="setModalState(true)" onchange="setChaosReportURL(this)">
</select>
</div>
</div>

View File

@@ -161,9 +161,14 @@ function setCodeNameToTextInput(elementId) {
function getMetrics() {
var oReq = new XMLHttpRequest();
oReq.onload = function () {
if (this.status !== 200) {
console.warn('[METRICS] /metrics returned status ' + this.status);
return;
}
var lines = this.responseText.split('\n');
for (var i = 0;i < lines.length;i++){
metric = lines[i].split(' ');
var metric = lines[i].split(' ');
if (!metric[0] || metric[0] === '') continue;
if (metric[0] == "chaos_node_jobs_total") {
$('#chaos_jobs_total').text(metric[1]);
@@ -189,7 +194,7 @@ function getMetrics() {
$('#pods_match_regex').text(metric[1]);
}
else if (metric[0].match(chaos_job_regex)) {
metrics_split = metric[0].split(":");
var metrics_split = metric[0].split(":");
chaos_jobs_status.set(metrics_split[1] + ":" + metrics_split[2] + ":" + metrics_split[3], metric[1]);
}
else if (metric[0] == "current_chaos_job_pod") {
@@ -197,7 +202,10 @@ function getMetrics() {
$('#current_chaos_job_pod').text(metric[1]);
}
}
};;
};
oReq.onerror = function () {
console.error('[METRICS] XHR error fetching /metrics');
};
oReq.open("GET", k8s_url + "/metrics");
oReq.send();
}
@@ -205,16 +213,21 @@ function getMetrics() {
function getChaosJobsPodsPhase() {
var oReq = new XMLHttpRequest();
oReq.onload = function () {
if (this.status !== 200) return;
var lines = this.responseText.split('\n');
for (var i = 0;i < lines.length;i++){
metric = lines[i].split(' ');
var metric = lines[i].split(' ');
if (!metric[0] || metric[0] === '') continue;
if (metric[0].match(chaos_job_regex)) {
metrics_split = metric[0].split(":");
var metrics_split = metric[0].split(":");
chaos_jobs_status.set(metrics_split[1] + ":" + metrics_split[2] + ":" + metrics_split[3], metric[1]);
}
}
};;
};
oReq.onerror = function () {
console.error('[METRICS] XHR error fetching /chaos_jobs_pod_phase');
};
oReq.open("GET", k8s_url + "/chaos_jobs_pod_phase");
oReq.send();
}
@@ -461,8 +474,9 @@ function getPods() {
let new_pods = jsonData.items;
// Pod might just be killed in game, but not terminated in k8s yet.
// Only keep "killed" visual if K8s hasn't reported it as fully ready again.
for (i=0; i<new_pods.length; i++) {
if (aliens.some((alien) => alien.name == new_pods[i].name && alien.status == "killed")) {
if (new_pods[i].status !== "ready" && aliens.some((alien) => alien.name == new_pods[i].name && alien.status == "killed")) {
new_pods[i].status = "killed";
}
}

View File

@@ -332,27 +332,45 @@ server {
}
location /metrics {
default_type text/html;
default_type text/plain;
content_by_lua_block {
ngx.header['Access-Control-Allow-Origin'] = '*'
ngx.header['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
ngx.header['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
ngx.header['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range'
local redis = require "resty.redis"
local red = redis:new()
local okredis, errredis = red:connect("unix:/tmp/redis.sock")
for i, res in ipairs(red:keys("*total*")) do
if string.find(res, "chaos_node_jobs_total_on") then
local node = string.gsub(res, "chaos_node_jobs_total_on_", "")
local metric = "chaos_jobs_node_count{node=\"".. node .."\"}"
ngx.say(metric .. " " .. red:get(res))
if not okredis then
ngx.log(ngx.ERR, "[metrics] Redis connection failed: " .. tostring(errredis))
ngx.status = 500
ngx.say("Redis connection failed")
return
end
elseif string.find(res, "deleted_pods_total_on") then
local namespace = string.gsub(res, "deleted_pods_total_on_", "")
local metric = "deleted_namespace_pods_count{namespace=\"".. namespace .."\"}"
ngx.say(metric .. " " .. red:get(res))
local total_keys = red:keys("*total*")
if total_keys and type(total_keys) == "table" then
for i, res in ipairs(total_keys) do
if string.find(res, "chaos_node_jobs_total_on") then
local node = string.gsub(res, "chaos_node_jobs_total_on_", "")
local metric = "chaos_jobs_node_count{node=\"".. node .."\"}"
ngx.say(metric .. " " .. red:get(res))
elseif string.find(res, "deleted_pods_total_on") then
local namespace = string.gsub(res, "deleted_pods_total_on_", "")
local metric = "deleted_namespace_pods_count{namespace=\"".. namespace .."\"}"
ngx.say(metric .. " " .. red:get(res))
end
end
end
for i, res in ipairs(red:keys("pods_match_regex:*")) do
ngx.say(res .. " " .. red:get(res))
local regex_keys = red:keys("pods_match_regex:*")
if regex_keys and type(regex_keys) == "table" then
for i, res in ipairs(regex_keys) do
ngx.say(res .. " " .. red:get(res))
end
end
local metrics = {
@@ -377,20 +395,39 @@ server {
end
end
for i, res in ipairs(red:keys("chaos_jobs_status*")) do
ngx.say(res .. " " .. red:get(res))
local status_keys = red:keys("chaos_jobs_status*")
if status_keys and type(status_keys) == "table" then
for i, res in ipairs(status_keys) do
ngx.say(res .. " " .. red:get(res))
end
end
}
}
location /chaos_jobs_pod_phase {
default_type text/html;
default_type text/plain;
content_by_lua_block {
ngx.header['Access-Control-Allow-Origin'] = '*'
ngx.header['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
ngx.header['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
ngx.header['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range'
local redis = require "resty.redis"
local red = redis:new()
local okredis, errredis = red:connect("unix:/tmp/redis.sock")
for i, res in ipairs(red:keys("chaos_jobs_pod_phase*")) do
ngx.say(res .. " " .. red:get(res))
if not okredis then
ngx.log(ngx.ERR, "[chaos_jobs_pod_phase] Redis connection failed: " .. tostring(errredis))
ngx.status = 500
ngx.say("Redis connection failed")
return
end
local phase_keys = red:keys("chaos_jobs_pod_phase*")
if phase_keys and type(phase_keys) == "table" then
for i, res in ipairs(phase_keys) do
ngx.say(res .. " " .. red:get(res))
end
end
}
}
@@ -740,10 +777,6 @@ server {
local lyaml = require "lyaml"
local json = require 'lunajson'
math.randomseed(os.clock()*100000000000)
local rand = math.random(999, 9999)
local file_name = "/tmp/chaosprogram" .. rand
ngx.header['Access-Control-Allow-Origin'] = '*'
ngx.header['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
@@ -751,29 +784,8 @@ server {
ngx.header['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range';
ngx.req.read_body()
local data = ngx.req.get_body_data()
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] Chaos program payload sent from client: " .. data)
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] Write temp file: " .. file_name)
local yamlfile = io.open(file_name, "w")
yamlfile:write(data)
yamlfile:close()
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] Checking if " .. file_name .. " is a valid YAML file")
local handle = io.popen("python3 -c 'import yaml, sys; print(yaml.safe_load(sys.stdin))' < " .. file_name .. " ; echo $? > " .. file_name .. ".check")
local result = handle:read("*a")
handle = io.popen("cat " .. file_name .. ".check")
result = handle:read("*a")
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] Exit code for checking YAML syntax of " .. file_name .. " is " .. result)
if result == "0\n" then
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] YAML Syntax is OK")
else
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] YAML Syntax is NOT OK")
handle = io.popen("rm -f " .. file_name .. ".check")
handle:read("*a")
ngx.status = 400
ngx.say("Invalid YAML Chaos Program")
if data then
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] Chaos program payload sent from client: " .. data)
end
if data == nil then
@@ -781,8 +793,12 @@ server {
ngx.status = 400
ngx.say(error)
else
local yaml_data = lyaml.load(data)
local parse_ok, yaml_data = pcall(lyaml.load, data)
if not parse_ok or type(yaml_data) ~= "table" then
ngx.status = 400
ngx.say("Invalid YAML Chaos Program")
return
end
if not key_exists(yaml_data, "k8s_jobs") then
error = "[PROGRAMMING_MODE] Chaos program does not contain 'jobs' key."
@@ -805,7 +821,6 @@ server {
ngx.say(error)
else
os.remove(file_name)
local response = json.encode(yaml_data)
ngx.log(ngx.INFO, response)
ngx.status = 200

View File

@@ -58,7 +58,21 @@ end
k8s_url = string.gsub(k8s_url, "/+$", "")
local token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"] or tostring(os.getenv("TOKEN") or "")
local header_token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"]
local token = ""
if header_token and header_token ~= "" then
token = header_token
else
token = tostring(os.getenv("TOKEN") or "")
end
if token == "" then
local f = io.open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r")
if f then
token = f:read("*a") or ""
token = token:gsub("%s+$", "")
f:close()
end
end
if token == "" then
ngx.status = 500
ngx.say("Missing Kubernetes API token configuration.")

View File

@@ -37,6 +37,15 @@ else
k8s_url = endpoint or ""
end
local req_headers = ngx.req.get_headers()
local target = arg["target"] or req_headers["x-k8s-target"] or req_headers["X-K8S-Target"]
if target and target ~= "" then
if not string.match(target, "^https?://") then
target = "https://" .. target
end
k8s_url = string.gsub(target, "/+$", "")
end
if k8s_url == "" then
ngx.status = 500
ngx.say("Missing Kubernetes endpoint configuration. Set KUBERNETES_SERVICE_HOST or ENDPOINT.")
@@ -49,16 +58,24 @@ end
k8s_url = string.gsub(k8s_url, "/+$", "")
local req_headers = ngx.req.get_headers()
local target = arg["target"] or req_headers["x-k8s-target"] or req_headers["X-K8S-Target"]
if target and target ~= "" then
if not string.match(target, "^https?://") then
target = "https://" .. target
end
k8s_url = string.gsub(target, "/+$", "")
local header_token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"]
local token = ""
if header_token and header_token ~= "" then
token = header_token
else
token = tostring(os.getenv("TOKEN") or "")
end
if token == "" then
local f = io.open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r")
if f then
token = f:read("*a") or ""
token = token:gsub("%s+$", "")
f:close()
end
end
local token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"] or tostring(os.getenv("TOKEN") or "")
if token == "" then
ngx.status = 500
ngx.say("Missing Kubernetes API token configuration.")

View File

@@ -23,7 +23,21 @@ local disable_tls = disable_tls_env == "true" or disable_tls_env == "1" or disab
local arg = ngx.req.get_uri_args()
local req_headers = ngx.req.get_headers()
local target = arg['target'] or req_headers["x-k8s-target"] or req_headers["X-K8S-Target"]
local token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"] or tostring(os.getenv("TOKEN") or "")
local header_token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"]
local token = ""
if header_token and header_token ~= "" then
token = header_token
else
token = tostring(os.getenv("TOKEN") or "")
end
if token == "" then
local f = io.open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r")
if f then
token = f:read("*a") or ""
token = token:gsub("%s+$", "")
f:close()
end
end
local ca_cert_b64 = req_headers["x-k8s-ca-cert-b64"] or req_headers["X-K8S-CA-CERT-B64"]
local ca_cert = nil
if ca_cert_b64 and ca_cert_b64 ~= "" then

View File

@@ -4,29 +4,19 @@ local json = require 'lunajson'
local redis = require "resty.redis"
local incr = 0
ngx.log(ngx.INFO, "[pod.lua] === Request started ===")
local k8s_url = ""
local kube_host = os.getenv("KUBERNETES_SERVICE_HOST")
local kube_port = os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")
local endpoint = os.getenv("ENDPOINT")
ngx.log(ngx.INFO, "[pod.lua] ENV KUBERNETES_SERVICE_HOST=" .. tostring(kube_host))
ngx.log(ngx.INFO, "[pod.lua] ENV KUBERNETES_SERVICE_PORT_HTTPS=" .. tostring(kube_port))
ngx.log(ngx.INFO, "[pod.lua] ENV ENDPOINT=" .. tostring(endpoint))
ngx.log(ngx.INFO, "[pod.lua] ENV TOKEN present=" .. tostring(os.getenv("TOKEN") ~= nil and os.getenv("TOKEN") ~= ""))
ngx.log(ngx.INFO, "[pod.lua] ENV DISABLE_TLS=" .. tostring(os.getenv("DISABLE_TLS")))
if kube_host and kube_host ~= "" then
local port_suffix = ""
if kube_port and kube_port ~= "" then
port_suffix = ":" .. kube_port
end
k8s_url = "https://" .. kube_host .. port_suffix
ngx.log(ngx.INFO, "[pod.lua] k8s_url from KUBERNETES_SERVICE_HOST=" .. k8s_url)
else
k8s_url = endpoint or ""
ngx.log(ngx.INFO, "[pod.lua] k8s_url from ENDPOINT=" .. k8s_url)
end
local token = tostring(os.getenv("TOKEN") or "")
if token == "" then
@@ -35,10 +25,8 @@ if token == "" then
token = f:read("*a") or ""
token = token:gsub("%s+$", "")
f:close()
ngx.log(ngx.INFO, "[pod.lua] Token loaded from SA file, length=" .. tostring(#token))
end
end
ngx.log(ngx.INFO, "[pod.lua] Initial token length=" .. tostring(#token))
local disable_tls_env = string.lower(tostring(os.getenv("DISABLE_TLS") or "false"))
local disable_tls = disable_tls_env == "true" or disable_tls_env == "1" or disable_tls_env == "yes"
@@ -49,24 +37,12 @@ local header_token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"]
local ca_cert_b64 = req_headers["x-k8s-ca-cert-b64"] or req_headers["X-K8S-CA-CERT-B64"]
local ca_cert = nil
ngx.log(ngx.INFO, "[pod.lua] Query arg target=" .. tostring(arg['target']))
ngx.log(ngx.INFO, "[pod.lua] Header x-k8s-target=" .. tostring(req_headers["x-k8s-target"]))
ngx.log(ngx.INFO, "[pod.lua] Resolved target=" .. tostring(target))
ngx.log(ngx.INFO, "[pod.lua] Header x-k8s-token present=" .. tostring(header_token ~= nil and header_token ~= ""))
ngx.log(ngx.INFO, "[pod.lua] Header x-k8s-ca-cert-b64 present=" .. tostring(ca_cert_b64 ~= nil and ca_cert_b64 ~= ""))
ngx.log(ngx.INFO, "[pod.lua] Query arg namespace=" .. tostring(arg['namespace']))
ngx.log(ngx.INFO, "[pod.lua] Query arg action=" .. tostring(arg['action']))
if ca_cert_b64 and ca_cert_b64 ~= "" then
ca_cert = ngx.decode_base64(ca_cert_b64)
ngx.log(ngx.INFO, "[pod.lua] CA cert decoded, length=" .. tostring(#ca_cert))
end
if header_token and header_token ~= "" then
token = header_token
ngx.log(ngx.INFO, "[pod.lua] Token overridden from header, new length=" .. tostring(#token))
else
ngx.log(ngx.INFO, "[pod.lua] No token override from header, keeping env token (length=" .. tostring(#token) .. ")")
end
local namespace = arg['namespace']
@@ -82,13 +58,8 @@ if target and target ~= "" then
target = "https://" .. target
end
k8s_url = string.gsub(target, "/+$", "")
ngx.log(ngx.INFO, "[pod.lua] k8s_url overridden from target=" .. k8s_url)
end
ngx.log(ngx.INFO, "[pod.lua] Final k8s_url=" .. k8s_url)
ngx.log(ngx.INFO, "[pod.lua] Final token length=" .. tostring(#token))
ngx.log(ngx.INFO, "[pod.lua] disable_tls=" .. tostring(disable_tls))
if k8s_url == "" then
ngx.log(ngx.ERR, "[pod.lua] FAIL: k8s_url is empty")
ngx.status = 500
@@ -126,21 +97,17 @@ if action == "delete" then
local okredis, errredis = red:connect("unix:/tmp/redis.sock")
if okredis then
ngx.log(ngx.INFO, "Connection to Redis is ok")
else
ngx.log(ngx.INFO, "Connection to Redis is not ok")
ngx.log(ngx.INFO, errredis)
ngx.log(ngx.ERR, "[pod.lua] Redis connection failed: " .. tostring(errredis))
end
local res, err = red:get("deleted_pods_total")
if res == ngx.null then
ngx.say(err)
ngx.log(ngx.INFO, "deleted_pods_total is not present on Redis. Creating it..")
red:set("deleted_pods_total", 1)
else
incr = res + 1
ngx.log(ngx.INFO, "deleted_pods_total is present on Redis. Incrementing it..")
red:set("deleted_pods_total", incr)
end
@@ -157,13 +124,11 @@ end
if action == "list" then
url = k8s_url.. "/api/v1/namespaces/" .. namespace .. "/pods"
ngx.log(ngx.INFO, "[pod.lua] Action=list, URL=" .. url)
elseif action == "delete" then
local pod_name = arg['pod_name']
url = k8s_url.. "/api/v1/namespaces/" .. namespace .. "/pods/" .. pod_name
method = "DELETE"
ngx.log(ngx.INFO, "[pod.lua] Action=delete, pod=" .. tostring(pod_name) .. ", URL=" .. url)
else
ngx.log(ngx.ERR, "[pod.lua] FAIL: invalid action=" .. tostring(action))
@@ -194,55 +159,55 @@ if not disable_tls and ca_cert and ca_cert ~= "" then
ca_file:write(ca_cert)
ca_file:close()
request_opts.cafile = ca_file_path
ngx.log(ngx.INFO, "[pod.lua] Custom CA cert written to " .. ca_file_path)
else
ngx.log(ngx.ERR, "[pod.lua] Failed to write CA cert to " .. ca_file_path)
end
end
ngx.log(ngx.INFO, "[pod.lua] Sending " .. method .. " request to " .. url .. " verify=" .. tostring(request_opts.verify) .. " cafile=" .. tostring(request_opts.cafile))
local ok, statusCode, headers, statusText = https.request(request_opts)
ngx.log(ngx.INFO, "[pod.lua] Response: ok=" .. tostring(ok) .. " statusCode=" .. tostring(statusCode) .. " statusText=" .. tostring(statusText))
ngx.log(ngx.INFO, "[pod.lua] Response body length=" .. tostring(#table.concat(resp)))
if action == "list" then
local i = 1
local j = 0
pods["items"] = {}
local resp_body = table.concat(resp)
ngx.log(ngx.INFO, "[pod.lua] Decoding JSON response for list action, body preview=" .. string.sub(resp_body, 1, 200))
local decode_ok, decode_err = pcall(function() decoded = json.decode(resp_body) end)
if not decode_ok then
ngx.log(ngx.ERR, "[pod.lua] JSON decode failed: " .. tostring(decode_err))
ngx.say("{\"items\": []}")
return
end
ngx.log(ngx.INFO, "[pod.lua] Decoded kind=" .. tostring(decoded["kind"]) .. " items count=" .. tostring(decoded["items"] and #decoded["items"] or "nil"))
if decoded["kind"] == "PodList" then
for k2,v2 in ipairs(decoded["items"]) do
if v2["status"]["phase"] == "Running" and v2["metadata"]["labels"]["chaos-controller"] ~= "kubeinvaders" then
-- ngx.log(ngx.INFO, "found pod " .. v2["metadata"]["name"])
local status = "pending"
for _, c in ipairs(v2["status"]["conditions"]) do
if c["type"] == "ContainersReady" and c["status"] == "True" then
status = "ready"
break
local metadata = v2["metadata"] or {}
local labels = metadata["labels"] or {}
local pod_name = metadata["name"] or ""
local pod_status = v2["status"] or {}
local phase = pod_status["phase"] or ""
if phase == "Running" and labels["chaos-controller"] ~= "kubeinvaders" then
local status = "ready"
local conditions = pod_status["conditions"]
if type(conditions) == "table" then
for _, c in ipairs(conditions) do
if (c["type"] == "ContainersReady" or c["type"] == "Ready") and c["status"] == "False" then
status = "pending"
break
end
end
end
pods["items"][i] = { name = v2["metadata"]["name"], status = status }
pods["items"][i] = { name = pod_name, status = status }
i = i + 1
pods_not_found = false;
elseif v2["status"]["phase"] == "ContainerCreating" and v2["metadata"]["labels"]["chaos-controller"] ~= "kubeinvaders" then
-- ngx.log(ngx.INFO, "found pod " .. v2["metadata"]["name"])
pods["items"][i] = { name = v2["metadata"]["name"], status = "pending" }
elseif phase == "ContainerCreating" and labels["chaos-controller"] ~= "kubeinvaders" then
pods["items"][i] = { name = pod_name, status = "pending" }
i = i + 1
pods_not_found = false;
elseif v2["status"]["phase"] == "Terminating" and v2["metadata"]["labels"]["chaos-controller"] ~= "kubeinvaders" then
-- ngx.log(ngx.INFO, "found pod " .. v2["metadata"]["name"])
pods["items"][i] = { name = v2["metadata"]["name"], status = "killed" }
elseif phase == "Terminating" and labels["chaos-controller"] ~= "kubeinvaders" then
pods["items"][i] = { name = pod_name, status = "killed" }
i = i + 1
pods_not_found = false;
elseif v2["status"]["phase"] ~= "Running" and v2["status"]["phase"] ~= "Completed" and v2["status"]["phase"] ~= "Succeeded" then
elseif phase ~= "Running" and phase ~= "Completed" and phase ~= "Succeeded" then
j = j + 1
end
end
@@ -285,18 +250,13 @@ if action == "list" then
end
if pods_not_found then
ngx.log(ngx.INFO, "[pod.lua] No pods found in namespace " .. namespace)
ngx.say("{\"items\": []}")
else
local encoded = json.encode(pods)
ngx.log(ngx.INFO, "[pod.lua] Returning " .. tostring(i - 1) .. " pods for namespace " .. namespace)
ngx.say(encoded)
end
elseif action == "delete" then
local delete_resp = table.concat(resp)
ngx.log(ngx.INFO, "[pod.lua] Delete response: " .. string.sub(delete_resp, 1, 300))
ngx.say(delete_resp)
end
ngx.log(ngx.INFO, "[pod.lua] === Request finished ===")

View File

@@ -8,9 +8,24 @@ local k8s_url = ""
local kube_host = os.getenv("KUBERNETES_SERVICE_HOST")
local kube_port = os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")
local endpoint = os.getenv("ENDPOINT")
local arg = ngx.req.get_uri_args()
local req_headers = ngx.req.get_headers()
local target = arg["target"] or req_headers["x-k8s-target"] or req_headers["X-K8S-Target"]
local token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"] or tostring(os.getenv("TOKEN") or "")
local header_token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"]
local token = ""
if header_token and header_token ~= "" then
token = header_token
else
token = tostring(os.getenv("TOKEN") or "")
end
if token == "" then
local f = io.open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r")
if f then
token = f:read("*a") or ""
token = token:gsub("%s+$", "")
f:close()
end
end
local ca_cert_b64 = req_headers["x-k8s-ca-cert-b64"] or req_headers["X-K8S-CA-CERT-B64"]
if kube_host and kube_host ~= "" then
@@ -62,7 +77,6 @@ ngx.header['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If
ngx.header['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range';
ngx.req.read_body()
local arg = ngx.req.get_uri_args()
local body_data = ngx.req.get_body_data()
ngx.log(ngx.INFO, "[PROGRAMMING_MODE] Payload sent by client: " .. body_data)

View File

@@ -41,7 +41,21 @@ end
k8s_url = string.gsub(k8s_url, "/+$", "")
local token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"] or tostring(os.getenv("TOKEN") or "")
local header_token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"]
local token = ""
if header_token and header_token ~= "" then
token = header_token
else
token = tostring(os.getenv("TOKEN") or "")
end
if token == "" then
local f = io.open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r")
if f then
token = f:read("*a") or ""
token = token:gsub("%s+$", "")
f:close()
end
end
if token == "" then
ngx.status = 500
ngx.say("Missing Kubernetes API token configuration.")

View File

@@ -41,7 +41,21 @@ end
k8s_url = string.gsub(k8s_url, "/+$", "")
local token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"] or tostring(os.getenv("TOKEN") or "")
local header_token = req_headers["x-k8s-token"] or req_headers["X-K8S-Token"]
local token = ""
if header_token and header_token ~= "" then
token = header_token
else
token = tostring(os.getenv("TOKEN") or "")
end
if token == "" then
local f = io.open("/var/run/secrets/kubernetes.io/serviceaccount/token", "r")
if f then
token = f:read("*a") or ""
token = token:gsub("%s+$", "")
f:close()
end
end
if token == "" then
ngx.status = 500
ngx.say("Missing Kubernetes API token configuration.")