mirror of
https://github.com/lucky-sideburn/kubeinvaders.git
synced 2026-02-14 17:50:00 +00:00
added new metrics for curron chaos job pod
This commit is contained in:
@@ -41,7 +41,6 @@ RUN sed -i.bak 's/listen\(.*\)80;/listen 8081;/' /etc/nginx/conf.d/default.conf
|
||||
RUN mkdir /usr/local/openresty/nginx/conf/kubeinvaders
|
||||
|
||||
COPY nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
|
||||
COPY scripts/metrics.lua /usr/local/openresty/nginx/conf/kubeinvaders/metrics.lua
|
||||
COPY scripts/pod.lua /usr/local/openresty/nginx/conf/kubeinvaders/pod.lua
|
||||
COPY scripts/node.lua /usr/local/openresty/nginx/conf/kubeinvaders/node.lua
|
||||
@@ -51,7 +50,11 @@ COPY scripts/chaos-containers.lua /usr/local/openresty/nginx/conf/kubeinvaders/c
|
||||
COPY scripts/programming_mode.lua /usr/local/openresty/nginx/conf/kubeinvaders/programming_mode.lua
|
||||
COPY scripts/config_kubeinv.lua /usr/local/openresty/lualib/config_kubeinv.lua
|
||||
COPY scripts/programming_mode /opt/programming_mode/
|
||||
COPY scripts/metrics_loop /opt/metrics_loop/
|
||||
|
||||
RUN pip3 install -r /opt/programming_mode/requirements.txt
|
||||
RUN pip3 install -r /opt/programming_mode/requirements.txt
|
||||
|
||||
COPY nginx/KubeInvaders.conf /etc/nginx/conf.d/KubeInvaders.conf
|
||||
RUN chmod g+rwx /var/cache/nginx /var/run /var/log/nginx /var/www/html /etc/nginx/conf.d
|
||||
|
||||
|
||||
@@ -10,4 +10,7 @@ fi
|
||||
# TODO: use a sidecar
|
||||
redis-server /etc/redis/redis.conf &
|
||||
|
||||
# TODO: use a sidecar
|
||||
bash /opt/metrics_loop/start.sh &
|
||||
|
||||
nginx -c /etc/nginx/nginx.conf -g 'daemon off;'
|
||||
|
||||
@@ -12,12 +12,12 @@ metadata:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "pods/log"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
verbs: ["delete"]
|
||||
- apiGroups: ["batch", "extensions"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- end -}}
|
||||
{{- if or .Values.clusterRole.create .Values.clusterRole.name }}
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
<div class="modal-dialog" role="document">
|
||||
<div class="modal-content">
|
||||
<div class="modal-body">
|
||||
<pre id="currentChaosContainrYaml"></pre>
|
||||
<pre id="currentChaosContainerYaml"></pre>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-light" data-dismiss="modal" onclick="closeCurrentChaosJobModal()">Close</button>
|
||||
@@ -169,7 +169,7 @@
|
||||
<div style="margin-top: 1%;">
|
||||
<span style="color: #000000;">Deleted Pod Total: <span id="deleted_pods_total" style="color: #8f00bb">0</span> - </span>
|
||||
<span style="color: #000000">Chaos Jobs Total: <span id="chaos_jobs_total" style="color: #8f00bb">0</span> - </span>
|
||||
<span style="color: #000000">Current Chaos Jobs: <span id="chaos_jobs_current" style="color: #8f00bb">0</span> - </span>
|
||||
<span style="color: #000000">Current Chaos Jobs Pods: <span id="current_chaos_job_pod" style="color: #8f00bb">0</span> - </span>
|
||||
<span style="color: #000000">Not Running Pods: <span id="pods_not_running_on" style="color: #8f00bb">0</span> - </span>
|
||||
<span style="color: #000000">Current Replicas State Delay: </span><span id="fewer_replicas_seconds" style="color: #8f00bb">0</span><font color="#4f4f4f"> sec</font> - </span>
|
||||
<span style="color: #000000">Latest Replicas State Delay: <span id="latest_fewer_replicas_seconds" style="color: #8f00bb">0</span><font color="#4f4f4f"> sec</font> </span>
|
||||
|
||||
@@ -132,8 +132,8 @@ function getMetrics() {
|
||||
else if (metric[0] == "pods_not_running_on_selected_ns") {
|
||||
$('#pods_not_running_on').text(metric[1]);
|
||||
}
|
||||
else if (metric[0] == "chaos_jobs_current") {
|
||||
$('#chaos_jobs_current').text(metric[1]);
|
||||
else if (metric[0] == "current_chaos_job_pod") {
|
||||
$('#current_chaos_job_pod').text(metric[1]);
|
||||
}
|
||||
}
|
||||
};;
|
||||
@@ -178,7 +178,7 @@ function getCurrentChaosContainer() {
|
||||
oReq.onload = function () {
|
||||
//console.log(this.responseText);
|
||||
job_parsed = JSON.stringify(JSON.parse(this.responseText), null, 4);
|
||||
$('#currentChaosContainrYaml').text(job_parsed);
|
||||
$('#currentChaosContainerYaml').text(job_parsed);
|
||||
$('#currentChaosContainerJsonTextArea').val(job_parsed);
|
||||
};;
|
||||
oReq.open("GET", "https://" + clu_endpoint + "/kube/chaos/containers?action=container_definition");
|
||||
|
||||
53
scripts/metrics_loop/experiments.yaml
Normal file
53
scripts/metrics_loop/experiments.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
jobs:
|
||||
# cpu-attack:
|
||||
# image: docker.io/luckysideburn/kubeinvaders-stress-ng:latest
|
||||
# command: "stress-ng"
|
||||
# args:
|
||||
# - --cpu
|
||||
# - 4
|
||||
# - --io
|
||||
# - 2
|
||||
# - --vm
|
||||
# - 1
|
||||
# - --vm-bytes
|
||||
# - 1G
|
||||
# - --timeout
|
||||
# - 10s
|
||||
# - --metrics-brief
|
||||
|
||||
# mem-attack:
|
||||
# image: docker.io/luckysideburn/kubeinvaders-stress-ng:latest
|
||||
# command: "stress-ng"
|
||||
# args:
|
||||
# - --vm
|
||||
# - 2
|
||||
# - --vm-bytes
|
||||
# - 1G
|
||||
|
||||
cpu-attack:
|
||||
image: docker.io/luckysideburn/kubeinvaders-stress-ng:latest
|
||||
command: "stress-ng"
|
||||
args:
|
||||
- --help
|
||||
|
||||
mem-attack:
|
||||
image: docker.io/luckysideburn/kubeinvaders-stress-ng:latest
|
||||
command: "stress-ng"
|
||||
args:
|
||||
- --help
|
||||
|
||||
experiments:
|
||||
- name: cpu-attack-exp
|
||||
job: cpu-attack
|
||||
loop: 5
|
||||
after:
|
||||
check_url: https://google.it
|
||||
check_payload: ''
|
||||
check_tls_ignore: true
|
||||
- name: mem-attack-exp
|
||||
job: mem-attack
|
||||
loop: 5
|
||||
after:
|
||||
check_url: https://google.it
|
||||
check_payload: ''
|
||||
check_tls_ignore: true
|
||||
4
scripts/metrics_loop/requirements.txt
Normal file
4
scripts/metrics_loop/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
redis
|
||||
pyyaml
|
||||
requests
|
||||
kubernetes
|
||||
87
scripts/metrics_loop/start.py
Normal file
87
scripts/metrics_loop/start.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from asyncio.log import logger
|
||||
import yaml
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from kubernetes import client, config
|
||||
from kubernetes.client.rest import ApiException
|
||||
import requests
|
||||
from string import Template
|
||||
import string
|
||||
import random
|
||||
import redis
|
||||
import time
|
||||
|
||||
def create_container(image, name, command, args):
|
||||
container = client.V1Container(
|
||||
image=image,
|
||||
name=name,
|
||||
image_pull_policy='IfNotPresent',
|
||||
args=args,
|
||||
command=command,
|
||||
)
|
||||
|
||||
logging.info(
|
||||
f"Created container with name: {container.name}, "
|
||||
f"image: {container.image} and args: {container.args}"
|
||||
)
|
||||
|
||||
return container
|
||||
|
||||
def create_pod_template(pod_name, container):
|
||||
pod_template = client.V1PodTemplateSpec(
|
||||
spec=client.V1PodSpec(restart_policy="Never", containers=[container]),
|
||||
metadata=client.V1ObjectMeta(name=pod_name, labels={"pod_name": pod_name, "approle": "chaosnode"}),
|
||||
)
|
||||
|
||||
return pod_template
|
||||
|
||||
def create_job(job_name, pod_template):
|
||||
metadata = client.V1ObjectMeta(name=job_name, labels={"job_name": job_name, "approle": "chaosnode"})
|
||||
|
||||
job = client.V1Job(
|
||||
api_version="batch/v1",
|
||||
kind="Job",
|
||||
metadata=metadata,
|
||||
spec=client.V1JobSpec(backoff_limit=0, template=pod_template),
|
||||
)
|
||||
#logger.info(job)
|
||||
return job
|
||||
|
||||
|
||||
r = redis.Redis(unix_socket_path='/tmp/redis.sock')
|
||||
|
||||
# create logger
|
||||
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
|
||||
logging.info('Starting script for KubeInvaders programming mode')
|
||||
|
||||
configuration = client.Configuration()
|
||||
token = os.environ["TOKEN"]
|
||||
configuration.api_key = {"authorization": f"Bearer {token}"}
|
||||
configuration.host = sys.argv[1]
|
||||
|
||||
configuration.insecure_skip_tls_verify = True
|
||||
configuration.verify_ssl = False
|
||||
|
||||
client.Configuration.set_default(configuration)
|
||||
client.Configuration.set_default(configuration)
|
||||
|
||||
api_instance = client.CoreV1Api()
|
||||
batch_api = client.BatchV1Api()
|
||||
namespace = "kubeinvaders"
|
||||
|
||||
while True:
|
||||
try:
|
||||
api_response = api_instance.list_namespaced_pod(namespace="kubeinvaders")
|
||||
#logging.info(api_response)
|
||||
except ApiException as e:
|
||||
logging.info(e)
|
||||
|
||||
r.set("current_chaos_job_pod", 0)
|
||||
|
||||
for pod in api_response.items:
|
||||
if pod.metadata.labels.get('approle') != None and pod.metadata.labels['approle'] == 'chaosnode':
|
||||
logging.info(pod.status.phase)
|
||||
if pod.status.phase == "Pending" or pod.status.phase == "Running":
|
||||
r.incr('current_chaos_job_pod')
|
||||
time.sleep(1)
|
||||
11
scripts/metrics_loop/start.sh
Executable file
11
scripts/metrics_loop/start.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ ! -z "$K8S_TOKEN" ];then
|
||||
echo 'Found K8S_TOKEN... using K8S_TOKEN instead of TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)'
|
||||
export TOKEN=$K8S_TOKEN
|
||||
else
|
||||
# Source the service account token from the container directly.
|
||||
export TOKEN="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
|
||||
fi
|
||||
|
||||
python3 /opt/metrics_loop/start.py https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS} &
|
||||
6
scripts/metrics_loop/start_dev.sh
Normal file
6
scripts/metrics_loop/start_dev.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
#! /bin/bash
|
||||
secret=$(kubectl get secret -n kubeinvaders | grep 'service-account-token' | grep kubeinvaders | awk '{ print $1}')
|
||||
token=$(kubectl describe secret $secret -n kubeinvaders | grep 'token:' | awk '{ print $2}')
|
||||
ip=$(ip address show eth0 | grep inet | grep -v inet6 | awk '{ print $2 }' | awk -F/ '{ print $1 }')
|
||||
export TOKEN=$token
|
||||
python3 start.py https://$ip:6443
|
||||
@@ -30,7 +30,7 @@ def create_container(image, name, command, args):
|
||||
def create_pod_template(pod_name, container):
|
||||
pod_template = client.V1PodTemplateSpec(
|
||||
spec=client.V1PodSpec(restart_policy="Never", containers=[container]),
|
||||
metadata=client.V1ObjectMeta(name=pod_name, labels={"pod_name": pod_name, "approle": "chaosnode"}),
|
||||
metadata=client.V1ObjectMeta(name=pod_name, labels={"app": "kubeinvaders", "pod_name": pod_name, "approle": "chaosnode"}),
|
||||
)
|
||||
|
||||
return pod_template
|
||||
|
||||
7
scripts/programming_mode/start_dev.sh
Normal file
7
scripts/programming_mode/start_dev.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#! /bin/bash
|
||||
|
||||
secret=$(kubectl get secret -n kubeinvaders | grep 'service-account-token' | awk '{ print $1}')
|
||||
token=$(kubectl describe secret $secret -n kubeinvaders | grep 'token:' | awk '{ print $2}')
|
||||
ip=$(ip address show eth0 | grep inet | grep -v inet6 | awk '{ print $2 }' | awk -F/ '{ print $1 }')
|
||||
export TOKEN=$token
|
||||
python3 start.py http://$ip:6443
|
||||
Reference in New Issue
Block a user