Refactor Labs

This commit is contained in:
Nishan
2025-04-05 15:20:55 +05:30
parent d8b69865f6
commit 41c4b277aa
223 changed files with 8468 additions and 1620 deletions

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -10,16 +10,26 @@ A powerful Kubernetes certification practice environment that provides a realist
- **Docker-based deployment** for easy setup and consistent environment - **Docker-based deployment** for easy setup and consistent environment
- **Timed exam mode** with real exam-like conditions and countdown timer - **Timed exam mode** with real exam-like conditions and countdown timer
## Demo Video
Watch our live demo video showcasing the CK-X Simulator in action:
[![CK-X Simulator Demo](https://img.youtube.com/vi/EQVGhF8x7R4/0.jpg)](https://www.youtube.com/watch?v=EQVGhF8x7R4&ab_channel=NishanB)
Click the image above or [this link](https://www.youtube.com/watch?v=EQVGhF8x7R4&ab_channel=NishanB) to see the full demo.
## Installation ## Installation
#### Linux & macOS #### Linux & macOS (tested on m1)
```bash ```bash
bash <(curl -fsSL https://raw.githubusercontent.com/nishanb/ck-x/main/scripts/install.sh) bash <(curl -fsSL https://raw.githubusercontent.com/nishanb/ck-x/master/scripts/install.sh)
``` ```
#### Windows #### Windows ( Currently not supported. Windows installation is unstable and may break during setup. )
```powershell ```powershell
irm https://raw.githubusercontent.com/nishanb/ck-x/main/scripts/install.ps1 | iex irm https://raw.githubusercontent.com/nishanb/ck-x/master/scripts/install.ps1 | iex
``` ```
### Manual Installation ### Manual Installation
@@ -48,7 +58,7 @@ If you find CK-X Simulator helpful, consider [buying me a coffee](https://buymea
## Disclaimer ## Disclaimer
CK-X is an independent tool, not affiliated with CNCF, Linux Foundation, or PSI. We do not guarantee exam success. Please read our [Privacy Policy](PRIVACY_POLICY.md) and [Terms of Service](TERMS_OF_SERVICE.md) for more details about data collection, usage, and limitations. CK-X is an independent tool, not affiliated with CNCF, Linux Foundation, or PSI. We do not guarantee exam success. Please read our [Privacy Policy](docs/PRIVACY_POLICY.md) and [Terms of Service](docs/TERMS_OF_SERVICE.md) for more details about data collection, usage, and limitations.
## Acknowledgments ## Acknowledgments

View File

@@ -41,7 +41,7 @@
</a> </a>
</li> </li>
<li class="nav-item"> <li class="nav-item">
<a class="nav-link" href="https://github.com/nishanb/CKAD-X" target="_blank"> <a class="nav-link" href="https://github.com/nishanb/ck-x" target="_blank">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="bi bi-github" viewBox="0 0 16 16"> <svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="bi bi-github" viewBox="0 0 16 16">
<path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.012 8.012 0 0 0 16 8c0-4.42-3.58-8-8-8z"/> <path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.012 8.012 0 0 0 16 8c0-4.42-3.58-8-8-8z"/>
</svg> </svg>
@@ -195,6 +195,7 @@
<option value="CKAD" selected>CKAD - Certified Kubernetes Application Developer</option> <option value="CKAD" selected>CKAD - Certified Kubernetes Application Developer</option>
<option value="CKA">CKA - Certified Kubernetes Administrator</option> <option value="CKA">CKA - Certified Kubernetes Administrator</option>
<option value="CKS">CKS - Certified Kubernetes Security Specialist</option> <option value="CKS">CKS - Certified Kubernetes Security Specialist</option>
<option value="Other">Other</option>
</select> </select>
</div> </div>
<div class="mb-3"> <div class="mb-3">
@@ -203,16 +204,21 @@
<option value="">Select an exam</option> <option value="">Select an exam</option>
</select> </select>
</div> </div>
<div class="mb-4"> <div class="mb-2">
<div class="alert alert-info" id="examDescription"> <div class="alert alert-info" id="examDescription">
Select an exam to see its description. Select an exam to see its description.
</div> </div>
</div> </div>
</form> </form>
</div> </div>
<div class="modal-footer"> <div class="modal-footer flex-column align-items-stretch py-1">
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">CANCEL</button> <div class="d-flex justify-content-end gap-2 mb-1">
<button type="button" class="btn btn-primary" id="startSelectedExam" disabled>START EXAM</button> <button type="button" class="btn btn-secondary" data-bs-dismiss="modal">CANCEL</button>
<button type="button" class="btn btn-primary" id="startSelectedExam" disabled>START EXAM</button>
</div>
<div class="text-muted text-center w-100" style="font-size: 0.65rem;">
By clicking on Start exam, you accept our <a href="https://github.com/nishanb/CK-X/blob/master/docs/PRIVACY_POLICY.md" target="_blank">Privacy Policy</a>
</div>
</div> </div>
</div> </div>
</div> </div>

View File

@@ -38,7 +38,7 @@ document.addEventListener('DOMContentLoaded', function() {
currentExamBtn.addEventListener('click', () => { currentExamBtn.addEventListener('click', () => {
if (currentExamId) { if (currentExamId) {
window.location.href = `/exam.html?id=${currentExamId}`; window.open(`/exam.html?id=${currentExamId}`, '_blank');
} else { } else {
showError('No exam ID available for redirection.'); showError('No exam ID available for redirection.');
} }
@@ -46,7 +46,7 @@ document.addEventListener('DOMContentLoaded', function() {
viewAnswersBtn.addEventListener('click', () => { viewAnswersBtn.addEventListener('click', () => {
if (currentExamId) { if (currentExamId) {
window.location.href = `/answers.html?id=${currentExamId}`; window.open(`/answers.html?id=${currentExamId}`, '_blank');
} else { } else {
showError('No exam ID available for viewing answers.'); showError('No exam ID available for viewing answers.');
} }

View File

@@ -4,7 +4,7 @@ Thank you for your interest in contributing! Here's how you can help:
## Quick Start ## Quick Start
1. Fork and clone the [repository](https://github.com/nishanb/ck-x) 1. Fork and clone the [repository](https://github.com/@nishanb/CK-X)
2. Follow our [Development Setup Guide](docs/development-setup.md) 2. Follow our [Development Setup Guide](docs/development-setup.md)
3. Create a new branch for your changes 3. Create a new branch for your changes
4. Submit a Pull Request 4. Submit a Pull Request

View File

@@ -12,6 +12,14 @@ Each lab in CK-X Simulator consists of:
4. **Setup and Verification Scripts** to prepare environments and validate student solutions 4. **Setup and Verification Scripts** to prepare environments and validate student solutions
5. **Answers File** with solution documentation 5. **Answers File** with solution documentation
# Considerations Before Creating a Lab
1. The cluster will consist of one control-plane node and multiple worker nodes.
2. SSH access to the nodes is not provided, which may restrict the development of labs that require access to Kubernetes internals or node internals.
3. All setup scripts will be executed simultaneously, so ensure that the questions are independent of each other.
4. Limit the setup to a maximum of two worker nodes to reduce system resource consumption during the exam.
5. When creating files in the cluster, use the /tmp/exam directory. This directory will be created during setup and removed during cleanup.
## Step 1: Create Lab Directory Structure ## Step 1: Create Lab Directory Structure
First, create a directory structure for your lab using this pattern: First, create a directory structure for your lab using this pattern:
@@ -62,7 +70,7 @@ Parameters:
- `lab`: Unique identifier for the lab (should match directory structure) - `lab`: Unique identifier for the lab (should match directory structure)
- `workerNodes`: Number of worker nodes required for this lab - `workerNodes`: Number of worker nodes required for this lab
- `answers`: Path to answers markdown file - `answers`: Path to answers markdown file
- `questions`: Path to assessment JSON file - `questions`: sessment JSON filename
- `totalMarks`: Maximum possible score - `totalMarks`: Maximum possible score
- `lowScore`, `mediumScore`, `highScore`: Score thresholds for result categorization - `lowScore`, `mediumScore`, `highScore`: Score thresholds for result categorization

View File

@@ -4,7 +4,7 @@
1. Clone the repository: 1. Clone the repository:
```bash ```bash
git clone https://github.com/nishanb/ck-x.git git clone https://github.com/@nishanb/CK-X.git
cd ck-x cd ck-x
``` ```
@@ -13,6 +13,10 @@ cd ck-x
./scripts/compose-deploy.sh ./scripts/compose-deploy.sh
``` ```
Alternatively,
2. Use the command `docker compose up` and manually navigate to `http://localhost:30080` in your browser
The script will deploy all services locally and open the application in your browser. The script will deploy all services locally and open the application in your browser.
After making any changes to the code, you can redeploy with: After making any changes to the code, you can redeploy with:

View File

@@ -0,0 +1,343 @@
# CKA Assessment Answers
## Question 1: Namespace and Pod Creation
Create a namespace named `app-team1` and create a pod named `nginx-pod` with the following specifications:
- Image: nginx:1.19
- Namespace: app-team1
- Label: run=nginx-pod
```yaml
# Create namespace
kubectl create namespace app-team1
# Create pod
kubectl run nginx-pod --image=nginx:1.19 -n app-team1 --labels=run=nginx-pod
```
## Question 2: Static Pod Creation
Create a static pod named `static-web` on ckad9999 with the following specifications:
- Image: nginx:1.19
- Port: 80
```yaml
# Create static pod manifest
cat << EOF > /etc/kubernetes/manifests/static-web.yaml
apiVersion: v1
kind: Pod
metadata:
name: static-web
spec:
containers:
- name: nginx
image: nginx:1.19
ports:
- containerPort: 80
EOF
```
## Question 3: Storage Setup
Create a StorageClass named `fast-storage` and a PVC named `data-pvc` with the following specifications:
StorageClass:
- Name: fast-storage
- Provisioner: kubernetes.io/no-provisioner
- Namespace: storage
PVC:
- Name: data-pvc
- StorageClass: fast-storage
- Size: 1Gi
- Namespace: storage
- Access Mode: ReadWriteOnce
```yaml
# Create storage namespace
kubectl create namespace storage
# Create StorageClass
cat << EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: fast-storage
provisioner: kubernetes.io/no-provisioner
EOF
# Create PVC
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-pvc
namespace: storage
spec:
storageClassName: fast-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
EOF
```
## Question 4: Logging Setup
Create a pod named `logger` in the monitoring namespace with the following specifications:
- Container 1: busybox (writes logs to /var/log/app.log)
- Container 2: fluentd (reads logs from the same location)
- Use emptyDir volume to share logs between containers
```yaml
# Create monitoring namespace
kubectl create namespace monitoring
# Create pod
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: logger
namespace: monitoring
spec:
containers:
- name: busybox
image: busybox
command: ['/bin/sh', '-c']
args:
- while true; do
echo "$(date) - Application log entry" >> /var/log/app.log;
sleep 10;
done
volumeMounts:
- name: log-volume
mountPath: /var/log
- name: fluentd
image: fluentd
volumeMounts:
- name: log-volume
mountPath: /var/log
volumes:
- name: log-volume
emptyDir: {}
EOF
```
## Question 5: RBAC Setup
Create a ServiceAccount named `app-sa` and configure RBAC to allow it to read pods in the default namespace.
```yaml
# Create ServiceAccount
kubectl create serviceaccount app-sa
# Create Role
cat << EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
EOF
# Create RoleBinding
cat << EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: read-pods
subjects:
- kind: ServiceAccount
name: app-sa
roleRef:
kind: Role
name: pod-reader
apiGroup: rbac.authorization.k8s.io
EOF
```
## Question 6: Network Policy
Create a NetworkPolicy named `db-policy` in the networking namespace to allow only frontend pods to access the database pods on port 3306.
```yaml
# Create networking namespace
kubectl create namespace networking
# Create NetworkPolicy
cat << EOF | kubectl apply -f -
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: db-policy
namespace: networking
spec:
podSelector:
matchLabels:
role: db
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
role: frontend
ports:
- protocol: TCP
port: 3306
EOF
```
## Question 7: Deployment and Service
Create a Deployment named `web-app` with 3 replicas and a NodePort Service named `web-service` with the following specifications:
Deployment:
- Name: web-app
- Image: nginx:1.19
- Replicas: 3
Service:
- Name: web-service
- Type: NodePort
- Port: 80
- Target Port: 80
```yaml
# Create Deployment
cat << EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-app
spec:
replicas: 3
selector:
matchLabels:
app: web-app
template:
metadata:
labels:
app: web-app
spec:
containers:
- name: nginx
image: nginx:1.19
EOF
# Create Service
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: web-service
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
selector:
app: web-app
EOF
```
## Question 8: Resource Management
Create a pod named `resource-pod` in the monitoring namespace with the following resource specifications:
- CPU Request: 100m
- Memory Request: 128Mi
- CPU Limit: 200m
- Memory Limit: 256Mi
```yaml
# Create pod
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: resource-pod
namespace: monitoring
spec:
containers:
- name: nginx
image: nginx
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "200m"
memory: "256Mi"
EOF
```
## Question 9: ConfigMap and Pod
Create a ConfigMap named `app-config` with a key `APP_COLOR` set to `blue` and create a pod named `config-pod` that mounts this ConfigMap at `/etc/config`.
```yaml
# Create ConfigMap
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
data:
APP_COLOR: blue
EOF
# Create pod
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: config-pod
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
name: app-config
EOF
```
## Question 10: Health Checks
Create a pod named `health-check` with the following health check specifications:
- Liveness Probe: HTTP GET / on port 80
- Readiness Probe: HTTP GET / on port 80
- Initial Delay: 5 seconds for both probes
```yaml
# Create pod
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: health-check
spec:
containers:
- name: nginx
image: nginx
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
EOF
```

View File

@@ -0,0 +1,255 @@
{
"questions": [
{
"id": "1",
"namespace": "app-team1",
"machineHostname": "ckad9999",
"question": "Create a pod named `nginx-pod` using the `nginx:1.19` image.\n\nEnsure the pod is created in the `app-team1` namespace and has the label `run=nginx-pod`.",
"concepts": ["pods", "labels", "namespaces"],
"verification": [
{
"id": "1",
"description": "Pod exists in correct namespace",
"verificationScriptFile": "q1_s1_validate_namespace.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Pod has correct image",
"verificationScriptFile": "q1_s2_validate_pod.sh",
"expectedOutput": "0",
"weightage": 1
},
{
"id": "3",
"description": "Pod has correct status and configuration",
"verificationScriptFile": "q1_s3_validate_pod_status.sh",
"expectedOutput": "0",
"weightage": 2
}
]
},
{
"id": "2",
"namespace": "default",
"machineHostname": "ckad9999",
"question": "Create a static pod named `static-web` on `ckad9999` using the `nginx:1.19` image.\n\nPlace the static pod manifest file at `/etc/kubernetes/manifests/static-web.yaml`.",
"concepts": ["static pods", "node configuration"],
"verification": [
{
"id": "1",
"description": "Static pod manifest exists",
"verificationScriptFile": "q2_s1_validate_static_pod.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Static pod manifest has correct configuration",
"verificationScriptFile": "q2_s2_validate_static_pod_config.sh",
"expectedOutput": "0",
"weightage": 3
}
]
},
{
"id": "3",
"namespace": "storage",
"machineHostname": "ckad9999",
"question": "Create a StorageClass named `fast-storage` with the provisioner `kubernetes.io/no-provisioner`.\n\nCreate a PersistentVolumeClaim named `data-pvc` in the `storage` namespace that uses this StorageClass.\n\nSet the access mode to `ReadWriteOnce` and request `1Gi` of storage.",
"concepts": ["storage", "storage classes", "persistent volumes"],
"verification": [
{
"id": "1",
"description": "StorageClass and PVC exist",
"verificationScriptFile": "q3_s1_validate_storageclass.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "StorageClass has correct configuration",
"verificationScriptFile": "q3_s2_validate_pvc.sh",
"expectedOutput": "0",
"weightage": 2
}
]
},
{
"id": "4",
"namespace": "monitoring",
"machineHostname": "ckad9999",
"question": "Create a pod named `logger` with two containers:\n\n1. A `busybox` container that writes logs to `/var/log/app.log`\n2. A `fluentd` container that reads logs from the same location\n\nUse an `emptyDir` volume to share logs between containers.",
"concepts": ["multi-container pods", "volumes", "logging"],
"verification": [
{
"id": "1",
"description": "Pod and containers exist",
"verificationScriptFile": "q4_s1_validate_pod.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Logging functionality is working",
"verificationScriptFile": "q4_s2_validate_logging.sh",
"expectedOutput": "0",
"weightage": 3
}
]
},
{
"id": "5",
"namespace": "default",
"machineHostname": "ckad9999",
"question": "Create a ServiceAccount named `app-sa`.\n\nCreate a Role named `pod-reader` that allows listing and getting pods.\n\nCreate a RoleBinding named `read-pods` that binds the `pod-reader` Role to the `app-sa` ServiceAccount.",
"concepts": ["RBAC", "service accounts", "roles"],
"verification": [
{
"id": "1",
"description": "Service Account exist",
"verificationScriptFile": "q5_s1_validate_sa.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Role has correct permissions",
"verificationScriptFile": "q5_s2_validate_rbac.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "3",
"description": "RBAC permissions are correctly configured",
"verificationScriptFile": "q5_s3_validate_rbac_permissions.sh",
"expectedOutput": "0",
"weightage": 3
}
]
},
{
"id": "6",
"namespace": "networking",
"machineHostname": "ckad9999",
"question": "Create a NetworkPolicy named `db-policy` in the `networking` namespace that:\n\n1. Allows pods with label `role=frontend` to connect to pods with label `role=db` on port `3306`\n2. Denies all other ingress traffic to pods with label `role=db`",
"concepts": ["network policies", "pod networking"],
"verification": [
{
"id": "1",
"description": "NetworkPolicy exists",
"verificationScriptFile": "q6_s1_validate_networkpolicy.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Network policy is correctly configured and effective",
"verificationScriptFile": "q6_s2_validate_network_policy_effect.sh",
"expectedOutput": "0",
"weightage": 3
}
]
},
{
"id": "7",
"namespace": "default",
"machineHostname": "ckad9999",
"question": "Create a deployment named `web-app` with `3` replicas using the `nginx:1.19` image.\n\nCreate a NodePort service named `web-service` that exposes the deployment on port `80`.\n\nEnsure the pods are distributed across multiple nodes.",
"concepts": ["deployments", "services", "pod distribution"],
"verification": [
{
"id": "1",
"description": "Deployment and service exist",
"verificationScriptFile": "q7_s1_validate_deployment.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Service has correct configuration",
"verificationScriptFile": "q7_s2_validate_service.sh",
"expectedOutput": "0",
"weightage": 2
}
]
},
{
"id": "8",
"namespace": "monitoring",
"machineHostname": "ckad9999",
"question": "Create a pod named `resource-pod` in the `monitoring` namespace with the following resource requirements:\n\n- CPU request: `100m`\n- CPU limit: `200m`\n- Memory request: `128Mi`\n- Memory limit: `256Mi`",
"concepts": ["resource management", "pod configuration"],
"verification": [
{
"id": "1",
"description": "Pod exists with correct resource configuration",
"verificationScriptFile": "q8_s1_validate_pod.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Resource usage and limits are correct",
"verificationScriptFile": "q8_s2_validate_resource_usage.sh",
"expectedOutput": "0",
"weightage": 3
}
]
},
{
"id": "9",
"namespace": "default",
"machineHostname": "ckad9999",
"question": "Create a ConfigMap named `app-config` with the key `APP_COLOR` and value `blue`.\n\nCreate a pod named `config-pod` that mounts this ConfigMap as a volume at `/etc/config`.",
"concepts": ["configmaps", "volumes", "pod configuration"],
"verification": [
{
"id": "1",
"description": "ConfigMap and pod exist",
"verificationScriptFile": "q9_s1_validate_configmap.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "ConfigMap has correct data",
"verificationScriptFile": "q9_s2_validate_pod.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "3",
"description": "ConfigMap usage and mounting are correct",
"verificationScriptFile": "q9_s3_validate_configmap_usage.sh",
"expectedOutput": "0",
"weightage": 3
}
]
},
{
"id": "10",
"namespace": "default",
"machineHostname": "ckad9999",
"question": "Create a pod named `health-check` with the following health check configuration:\n\n- Liveness probe: HTTP GET on `/` port `80` with initial delay of `5` seconds\n- Readiness probe: HTTP GET on `/` port `80` with initial delay of `5` seconds",
"concepts": ["health checks", "pod lifecycle"],
"verification": [
{
"id": "1",
"description": "Pod exists with health check configuration",
"verificationScriptFile": "q10_s1_validate_pod.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "2",
"description": "Health check effectiveness is verified",
"verificationScriptFile": "q10_s2_validate_health_check_effectiveness.sh",
"expectedOutput": "0",
"weightage": 3
}
]
}
]
}

View File

@@ -0,0 +1,10 @@
{
"lab": "cka-001",
"workerNodes": 2,
"answers": "assets/exams/cka/001/answers.md",
"questions": "assessment.json",
"totalMarks": 100,
"lowScore": 40,
"mediumScore": 60,
"highScore": 90
}

View File

@@ -0,0 +1,28 @@
#!/bin/bash
# Setup for Question 10: Health Check setup
# Remove any existing pod with the same name
kubectl delete pod health-check --ignore-not-found=true
# Pre-pull the nginx image
kubectl run prefetch-nginx --image=nginx --restart=Never --dry-run=client -o yaml | kubectl apply -f -
# Create a ConfigMap with a custom nginx configuration that includes /healthz endpoint
kubectl create configmap nginx-health-config --from-literal=nginx.conf='
events {
worker_connections 1024;
}
http {
server {
listen 80;
location /healthz {
access_log off;
return 200 "healthy\n";
}
}
}' --dry-run=client -o yaml | kubectl apply -f -
sleep 5
kubectl delete pod prefetch-nginx --ignore-not-found=true
exit 0

View File

@@ -0,0 +1,8 @@
#!/bin/bash
# Setup for Question 1: Create namespace and pod
# No specific setup needed as this is a creation task
# Just ensure the namespace doesn't exist already
kubectl delete namespace app-team1 --ignore-not-found=true
exit 0

View File

@@ -0,0 +1,10 @@
#!/bin/bash
# Setup for Question 2: Static Pod setup
# Ensure the static pod directory exists
mkdir -p /etc/kubernetes/manifests/
# Remove any existing static pod with the same name
rm -f /etc/kubernetes/manifests/static-web.yaml
exit 0

View File

@@ -0,0 +1,11 @@
#!/bin/bash
# Setup for Question 3: Storage setup
# Create storage namespace if it doesn't exist
kubectl create namespace storage --dry-run=client -o yaml | kubectl apply -f -
# Remove any existing storage class and PVC with the same names
kubectl delete storageclass fast-storage --ignore-not-found=true
kubectl delete pvc -n storage data-pvc --ignore-not-found=true
exit 0

View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Setup for Question 4: Logging setup
# Create monitoring namespace if it doesn't exist
kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f -
# Remove any existing pod with the same name
kubectl delete pod -n monitoring logger --ignore-not-found=true
# Pull required images in advance to speed up pod creation
kubectl run prefetch-busybox --image=busybox --restart=Never -n monitoring --dry-run=client -o yaml | kubectl apply -f -
kubectl run prefetch-fluentd --image=fluentd:v1.14 --restart=Never -n monitoring --dry-run=client -o yaml | kubectl apply -f -
# Wait for prefetch pods to be created
sleep 5
# Clean up prefetch pods
kubectl delete pod -n monitoring prefetch-busybox --ignore-not-found=true
kubectl delete pod -n monitoring prefetch-fluentd --ignore-not-found=true
exit 0

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Setup for Question 5: RBAC setup
# Remove any existing resources with the same names
kubectl delete serviceaccount app-sa --ignore-not-found=true
kubectl delete role pod-reader --ignore-not-found=true
kubectl delete rolebinding read-pods --ignore-not-found=true
# Create a test pod to verify RBAC permissions later
kubectl run test-pod --image=nginx --restart=Never --dry-run=client -o yaml | kubectl apply -f -
exit 0

View File

@@ -0,0 +1,14 @@
#!/bin/bash
# Setup for Question 6: Network Policy setup
# Create networking namespace if it doesn't exist
kubectl create namespace networking --dry-run=client -o yaml | kubectl apply -f -
# Remove any existing network policy
kubectl delete networkpolicy -n networking db-policy --ignore-not-found=true
# Create test pods with appropriate labels
kubectl run frontend --image=nginx --labels=role=frontend -n networking --dry-run=client -o yaml | kubectl apply -f -
kubectl run db --image=mysql --labels=role=db -n networking --env=MYSQL_ROOT_PASSWORD=password --dry-run=client -o yaml | kubectl apply -f -
exit 0

View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Setup for Question 7: Deployment and Service setup
# Remove any existing deployment and service with the same names
kubectl delete deployment web-app --ignore-not-found=true
kubectl delete service web-service --ignore-not-found=true
# Pre-pull the nginx image to speed up deployment creation
kubectl run prefetch-nginx --image=nginx:1.20 --restart=Never --dry-run=client -o yaml | kubectl apply -f -
sleep 5
kubectl delete pod prefetch-nginx --ignore-not-found=true
exit 0

View File

@@ -0,0 +1,15 @@
#!/bin/bash
# Setup for Question 8: Resource Management setup
# Ensure monitoring namespace exists
kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f -
# Remove any existing pod with the same name
kubectl delete pod -n monitoring resource-pod --ignore-not-found=true
# Pre-pull the nginx image
kubectl run prefetch-nginx --image=nginx --restart=Never -n monitoring --dry-run=client -o yaml | kubectl apply -f -
sleep 5
kubectl delete pod -n monitoring prefetch-nginx --ignore-not-found=true
exit 0

View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Setup for Question 9: ConfigMap setup
# Remove any existing configmap and pod with the same names
kubectl delete configmap app-config --ignore-not-found=true
kubectl delete pod config-pod --ignore-not-found=true
# Pre-pull the nginx image
kubectl run prefetch-nginx --image=nginx --restart=Never --dry-run=client -o yaml | kubectl apply -f -
sleep 5
kubectl delete pod prefetch-nginx --ignore-not-found=true
exit 0

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Validate if pod with health checks exists with correct configuration
POD_NAME="health-check"
EXPECTED_IMAGE="nginx"
# Check if pod exists
if ! kubectl get pod $POD_NAME &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found"
exit 1
fi
# Check if pod is running
POD_STATUS=$(kubectl get pod $POD_NAME -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' exists but is not running (status: $POD_STATUS)"
exit 1
fi
# Check if correct image is used
POD_IMAGE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].image}')
if [ "$POD_IMAGE" != "$EXPECTED_IMAGE" ]; then
echo "❌ Pod '$POD_NAME' using incorrect image: $POD_IMAGE (expected: $EXPECTED_IMAGE)"
exit 1
fi
# Check if liveness probe is configured
LIVENESS_PROBE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.path}')
if [ "$LIVENESS_PROBE" != "/" ]; then
echo "❌ Pod '$POD_NAME' missing liveness probe or incorrect path"
exit 1
fi
# Check if liveness probe port is correct
LIVENESS_PORT=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}')
if [ "$LIVENESS_PORT" != "80" ]; then
echo "❌ Pod '$POD_NAME' has incorrect liveness probe port: $LIVENESS_PORT (expected: 80)"
exit 1
fi
# Check if readiness probe is configured
READINESS_PROBE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].readinessProbe.httpGet.path}')
if [ "$READINESS_PROBE" != "/" ]; then
echo "❌ Pod '$POD_NAME' missing readiness probe or incorrect path"
exit 1
fi
# Check if readiness probe port is correct
READINESS_PORT=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].readinessProbe.httpGet.port}')
if [ "$READINESS_PORT" != "80" ]; then
echo "❌ Pod '$POD_NAME' has incorrect readiness probe port: $READINESS_PORT (expected: 80)"
exit 1
fi
# Check if probes have correct initial delay
LIVENESS_DELAY=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].livenessProbe.initialDelaySeconds}')
READINESS_DELAY=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].readinessProbe.initialDelaySeconds}')
if [ "$LIVENESS_DELAY" != "5" ]; then
echo "❌ Pod '$POD_NAME' has incorrect liveness probe initial delay: $LIVENESS_DELAY (expected: 5)"
exit 1
fi
if [ "$READINESS_DELAY" != "5" ]; then
echo "❌ Pod '$POD_NAME' has incorrect readiness probe initial delay: $READINESS_DELAY (expected: 5)"
exit 1
fi
echo "✅ Pod '$POD_NAME' exists with correct health check configuration"
exit 0

View File

@@ -0,0 +1,88 @@
#!/bin/bash
# Validate health check effectiveness
POD_NAME="health-check"
# Check if pod exists and is running
if ! kubectl get pod $POD_NAME &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found"
exit 1
fi
POD_STATUS=$(kubectl get pod $POD_NAME -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' is not running"
exit 1
fi
# Check liveness probe configuration
LIVENESS_PROBE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.path}')
LIVENESS_PORT=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}')
LIVENESS_DELAY=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].livenessProbe.initialDelaySeconds}')
LIVENESS_PERIOD=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].livenessProbe.periodSeconds}')
if [ "$LIVENESS_PROBE" != "/" ]; then
echo "❌ Incorrect liveness probe path: $LIVENESS_PROBE"
exit 1
fi
if [ "$LIVENESS_PORT" != "80" ]; then
echo "❌ Incorrect liveness probe port: $LIVENESS_PORT"
exit 1
fi
if [ "$LIVENESS_DELAY" != "5" ]; then
echo "❌ Incorrect liveness probe initial delay: $LIVENESS_DELAY"
exit 1
fi
# Check readiness probe configuration
READINESS_PROBE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].readinessProbe.httpGet.path}')
READINESS_PORT=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].readinessProbe.httpGet.port}')
READINESS_DELAY=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].readinessProbe.initialDelaySeconds}')
READINESS_PERIOD=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].readinessProbe.periodSeconds}')
if [ "$READINESS_PROBE" != "/" ]; then
echo "❌ Incorrect readiness probe path: $READINESS_PROBE"
exit 1
fi
if [ "$READINESS_PORT" != "80" ]; then
echo "❌ Incorrect readiness probe port: $READINESS_PORT"
exit 1
fi
if [ "$READINESS_DELAY" != "5" ]; then
echo "❌ Incorrect readiness probe initial delay: $READINESS_DELAY"
exit 1
fi
# Check if health endpoint is responding
POD_IP=$(kubectl get pod $POD_NAME -o jsonpath='{.status.podIP}')
if [ -z "$POD_IP" ]; then
echo "❌ Pod IP not found"
exit 1
fi
# Test health endpoint (root path for nginx)
if ! kubectl exec $POD_NAME -- curl -s http://localhost/ | grep -q "Welcome to nginx"; then
echo "❌ Root endpoint is not responding correctly"
exit 1
fi
# Check if pod has been restarted due to health check failures
RESTARTS=$(kubectl get pod $POD_NAME -o jsonpath='{.status.containerStatuses[0].restartCount}')
if [ "$RESTARTS" -gt 0 ]; then
echo "❌ Pod has been restarted $RESTARTS times"
exit 1
fi
# Check if pod is ready
READY=$(kubectl get pod $POD_NAME -o jsonpath='{.status.containerStatuses[0].ready}')
if [ "$READY" != "true" ]; then
echo "❌ Pod is not ready"
exit 1
fi
echo "✅ Health checks are correctly configured and working"
exit 0

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Validate if namespace app-team1 exists
NAMESPACE="app-team1"
if kubectl get namespace $NAMESPACE &> /dev/null; then
echo "✅ Namespace '$NAMESPACE' exists"
exit 0
else
echo "❌ Namespace '$NAMESPACE' not found"
exit 1
fi

View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Validate if nginx pod exists and is running in app-team1 namespace
NAMESPACE="app-team1"
POD_NAME="nginx-pod"
EXPECTED_IMAGE="nginx:1.19"
# Check if pod exists
if ! kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Check if pod is running
POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' exists but is not running (status: $POD_STATUS)"
exit 1
fi
# Check if correct image is used
POD_IMAGE=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].image}')
if [ "$POD_IMAGE" != "$EXPECTED_IMAGE" ]; then
echo "❌ Pod '$POD_NAME' is using incorrect image: $POD_IMAGE (expected: $EXPECTED_IMAGE)"
exit 1
fi
echo "✅ Pod '$POD_NAME' exists, is running, and using correct image in namespace '$NAMESPACE'"
exit 0

View File

@@ -0,0 +1,36 @@
#!/bin/bash
# Validate pod status and readiness
NAMESPACE="app-team1"
POD_NAME="nginx-pod"
# Check if pod is ready
READY=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[0].ready}')
if [ "$READY" != "true" ]; then
echo "❌ Pod '$POD_NAME' is not ready"
exit 1
fi
# Check if pod has restarted
RESTARTS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[0].restartCount}')
if [ "$RESTARTS" != "0" ]; then
echo "❌ Pod '$POD_NAME' has restarted $RESTARTS times"
exit 1
fi
# Check if pod is running on a valid node
NODE=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.nodeName}')
if [ -z "$NODE" ]; then
echo "❌ Pod '$POD_NAME' is not assigned to any node"
exit 1
fi
# Check if pod has the correct labels
LABELS=$(kubectl get pod $POD_NAME -n $NAMESPACE --show-labels)
if [[ ! "$LABELS" =~ "run=nginx-pod" ]]; then
echo "❌ Pod '$POD_NAME' missing expected labels"
exit 1
fi
echo "✅ Pod '$POD_NAME' has correct status and configuration"
exit 0

View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Validate if static pod manifest exists in the correct location
POD_NAME="static-web"
EXPECTED_IMAGE="nginx:1.19"
# Check if the static pod manifest file exists
if [ ! -f "/etc/kubernetes/manifests/static-web.yaml" ]; then
echo "❌ Static pod manifest file not found at /etc/kubernetes/manifests/static-web.yaml"
exit 1
fi
# Verify the manifest is a valid yaml file
if ! grep -q "apiVersion: v1" "/etc/kubernetes/manifests/static-web.yaml"; then
echo "❌ Static pod manifest is not a valid Kubernetes yaml file"
exit 1
fi
# Verify it's a Pod resource
if ! grep -q "kind: Pod" "/etc/kubernetes/manifests/static-web.yaml"; then
echo "❌ Static pod manifest is not configured as a Pod resource"
exit 1
fi
# Verify the pod name
if ! grep -q "name: ${POD_NAME}" "/etc/kubernetes/manifests/static-web.yaml"; then
echo "❌ Static pod manifest does not have the correct pod name: ${POD_NAME}"
exit 1
fi
echo "✅ Static pod manifest exists at the correct location with proper configuration"
exit 0

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Validate static pod manifest configuration
POD_NAME="static-web"
EXPECTED_IMAGE="nginx:1.19"
# Check if static pod manifest exists
if [ ! -f "/etc/kubernetes/manifests/static-web.yaml" ]; then
echo "❌ Static pod manifest not found at /etc/kubernetes/manifests/static-web.yaml"
exit 1
fi
# Check if manifest has correct content structure
if ! grep -q "apiVersion: v1" /etc/kubernetes/manifests/static-web.yaml; then
echo "❌ Static pod manifest missing apiVersion"
exit 1
fi
if ! grep -q "kind: Pod" /etc/kubernetes/manifests/static-web.yaml; then
echo "❌ Static pod manifest missing required content: not a Pod resource"
exit 1
fi
# Check if manifest has correct name
if ! grep -q "name: ${POD_NAME}" /etc/kubernetes/manifests/static-web.yaml; then
echo "❌ Static pod manifest has incorrect name"
exit 1
fi
# Check if manifest has correct image
if ! grep -q "image: ${EXPECTED_IMAGE}" /etc/kubernetes/manifests/static-web.yaml; then
echo "❌ Static pod manifest has incorrect image. Expected: ${EXPECTED_IMAGE}"
exit 1
fi
# Check if port 80 is defined
if ! grep -q "containerPort: 80" /etc/kubernetes/manifests/static-web.yaml; then
echo "❌ Static pod manifest missing containerPort: 80"
exit 1
fi
echo "✅ Static pod manifest has correct configuration"
exit 0

View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Validate if StorageClass exists with correct configuration
SC_NAME="fast-storage"
EXPECTED_PROVISIONER="kubernetes.io/no-provisioner"
# Check if StorageClass exists
if ! kubectl get storageclass $SC_NAME &> /dev/null; then
echo "❌ StorageClass '$SC_NAME' not found"
exit 1
fi
# Check if correct provisioner is used
PROVISIONER=$(kubectl get storageclass $SC_NAME -o jsonpath='{.provisioner}')
if [ "$PROVISIONER" != "$EXPECTED_PROVISIONER" ]; then
echo "❌ StorageClass '$SC_NAME' using incorrect provisioner: $PROVISIONER (expected: $EXPECTED_PROVISIONER)"
exit 1
fi
echo "✅ StorageClass '$SC_NAME' exists with correct provisioner"
exit 0

View File

@@ -0,0 +1,30 @@
#!/bin/bash
# Validate if PVC exists with correct configuration
NAMESPACE="storage"
PVC_NAME="data-pvc"
SC_NAME="fast-storage"
EXPECTED_SIZE="1Gi"
# Check if PVC exists
if ! kubectl get pvc $PVC_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ PVC '$PVC_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Check if correct storage class is used
STORAGE_CLASS=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.spec.storageClassName}')
if [ "$STORAGE_CLASS" != "$SC_NAME" ]; then
echo "❌ PVC '$PVC_NAME' using incorrect storage class: $STORAGE_CLASS (expected: $SC_NAME)"
exit 1
fi
# Check if correct size is requested
SIZE=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.spec.resources.requests.storage}')
if [ "$SIZE" != "$EXPECTED_SIZE" ]; then
echo "❌ PVC '$PVC_NAME' requesting incorrect size: $SIZE (expected: $EXPECTED_SIZE)"
exit 1
fi
echo "✅ PVC '$PVC_NAME' exists with correct storage class and size"
exit 0

View File

@@ -0,0 +1,44 @@
#!/bin/bash
# Validate storage access and configuration
NAMESPACE="storage"
PVC_NAME="data-pvc"
SC_NAME="fast-storage"
# # Check if PVC is bound
# PVC_STATUS=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.status.phase}')
# if [ "$PVC_STATUS" != "Bound" ]; then
# echo "❌ PVC '$PVC_NAME' is not bound (status: $PVC_STATUS)"
# exit 1
# fi
# Check if StorageClass is default
SC_DEFAULT=$(kubectl get storageclass $SC_NAME -o jsonpath='{.metadata.annotations.storageclass\.kubernetes\.io/is-default-class}')
if [ "$SC_DEFAULT" != "true" ]; then
echo "❌ StorageClass '$SC_NAME' is not set as default"
exit 1
fi
# Check if StorageClass has correct reclaim policy
RECLAIM_POLICY=$(kubectl get storageclass $SC_NAME -o jsonpath='{.reclaimPolicy}')
if [ "$RECLAIM_POLICY" != "Delete" ]; then
echo "❌ StorageClass '$SC_NAME' has incorrect reclaim policy: $RECLAIM_POLICY"
exit 1
fi
# Check if PVC has correct access mode
ACCESS_MODE=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.spec.accessModes[0]}')
if [ "$ACCESS_MODE" != "ReadWriteOnce" ]; then
echo "❌ PVC '$PVC_NAME' has incorrect access mode: $ACCESS_MODE"
exit 1
fi
# Check if PVC has correct storage class
PVC_SC=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.spec.storageClassName}')
if [ "$PVC_SC" != "$SC_NAME" ]; then
echo "❌ PVC '$PVC_NAME' has incorrect storage class: $PVC_SC"
exit 1
fi
echo "✅ Storage configuration is correct"
exit 0

View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Validate if logging pod exists with correct configuration
NAMESPACE="monitoring"
POD_NAME="logger"
EXPECTED_CONTAINERS=2
# Check if pod exists
if ! kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Check if pod is running
POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' exists but is not running (status: $POD_STATUS)"
exit 1
fi
# Check if pod has two containers
CONTAINER_COUNT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[*].name}' | wc -w)
if [ "$CONTAINER_COUNT" -ne "$EXPECTED_CONTAINERS" ]; then
echo "❌ Pod '$POD_NAME' has incorrect number of containers: $CONTAINER_COUNT (expected: $EXPECTED_CONTAINERS)"
exit 1
fi
# Check if containers are using correct images
BUSYBOX_CONTAINER=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[?(@.image=="busybox")].name}')
FLUENTD_CONTAINER=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[?(@.image=="fluentd")].name}')
if [ -z "$BUSYBOX_CONTAINER" ] || [ -z "$FLUENTD_CONTAINER" ]; then
echo "❌ Pod '$POD_NAME' is missing required containers with correct images"
exit 1
fi
# Check if volume mount is configured
LOG_MOUNT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].volumeMounts[?(@.mountPath=="/var/log")].mountPath}')
if [ -z "$LOG_MOUNT" ]; then
echo "❌ Volume mount '/var/log' not configured in pod '$POD_NAME'"
exit 1
fi
# Check if emptyDir volume is used
VOLUME_TYPE=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.volumes[?(@.name=="log-volume")].emptyDir}')
if [ -z "$VOLUME_TYPE" ]; then
echo "❌ Pod '$POD_NAME' is not using emptyDir volume type"
exit 1
fi
echo "✅ Pod '$POD_NAME' exists with correct configuration and containers"
exit 0

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Validate logging functionality
NAMESPACE="monitoring"
POD_NAME="logger"
# Check if log file exists and is being written to
kubectl exec -n $NAMESPACE $POD_NAME -c busybox -- ls -l /var/log/app.log &> /dev/null
if [ $? -ne 0 ]; then
echo "❌ Log file /var/log/app.log does not exist"
exit 1
fi
# Check if log file has recent entries
LOG_ENTRIES=$(kubectl exec -n $NAMESPACE $POD_NAME -c busybox -- wc -l /var/log/app.log | awk '{print $1}')
if [ "$LOG_ENTRIES" -lt 1 ]; then
echo "❌ Log file has no entries"
exit 1
fi
# Check if fluentd container is running
FLUENTD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[?(@.name=="fluentd")].state.running}')
if [ -z "$FLUENTD_STATUS" ]; then
echo "❌ Fluentd container is not running"
exit 1
fi
# Check if fluentd has correct configuration
FLUENTD_CONFIG=$(kubectl exec -n $NAMESPACE $POD_NAME -c fluentd -- cat /fluentd/etc/fluent.conf)
if [[ ! "$FLUENTD_CONFIG" =~ "source" ]] || [[ ! "$FLUENTD_CONFIG" =~ "match" ]]; then
echo "❌ Fluentd configuration is incomplete"
exit 1
fi
# Check if log volume is properly mounted
VOLUME_MOUNT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.volumes[?(@.name=="log-volume")].emptyDir}')
if [ -z "$VOLUME_MOUNT" ]; then
echo "❌ Log volume is not properly configured"
exit 1
fi
echo "✅ Logging configuration is correct and functional"
exit 0

View File

@@ -0,0 +1,14 @@
#!/bin/bash
# Validate if ServiceAccount exists
SA_NAME="app-sa"
NAMESPACE="default"
# Check if ServiceAccount exists
if ! kubectl get serviceaccount $SA_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ ServiceAccount '$SA_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
echo "✅ ServiceAccount '$SA_NAME' exists in namespace '$NAMESPACE'"
exit 0

View File

@@ -0,0 +1,50 @@
#!/bin/bash
# Validate if Role and RoleBinding exist with correct permissions
NAMESPACE="default"
ROLE_NAME="pod-reader"
ROLEBINDING_NAME="read-pods"
SA_NAME="app-sa"
# Check if Role exists
if ! kubectl get role $ROLE_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ Role '$ROLE_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Check if Role has correct permissions
VERBS=$(kubectl get role $ROLE_NAME -n $NAMESPACE -o jsonpath='{.rules[0].verbs[*]}')
RESOURCES=$(kubectl get role $ROLE_NAME -n $NAMESPACE -o jsonpath='{.rules[0].resources[*]}')
if [[ ! "$VERBS" =~ "get" ]] || [[ ! "$VERBS" =~ "list" ]]; then
echo "❌ Role '$ROLE_NAME' missing required permissions (get and/or list)"
exit 1
fi
if [[ ! "$RESOURCES" =~ "pods" ]]; then
echo "❌ Role '$ROLE_NAME' not configured for pods resource"
exit 1
fi
# Check if RoleBinding exists
if ! kubectl get rolebinding $ROLEBINDING_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ RoleBinding '$ROLEBINDING_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Check if RoleBinding links correct Role and ServiceAccount
BOUND_ROLE=$(kubectl get rolebinding $ROLEBINDING_NAME -n $NAMESPACE -o jsonpath='{.roleRef.name}')
BOUND_SA=$(kubectl get rolebinding $ROLEBINDING_NAME -n $NAMESPACE -o jsonpath='{.subjects[?(@.kind=="ServiceAccount")].name}')
if [ "$BOUND_ROLE" != "$ROLE_NAME" ]; then
echo "❌ RoleBinding '$ROLEBINDING_NAME' not bound to correct role (found: $BOUND_ROLE, expected: $ROLE_NAME)"
exit 1
fi
if [ "$BOUND_SA" != "$SA_NAME" ]; then
echo "❌ RoleBinding '$ROLEBINDING_NAME' not bound to correct ServiceAccount (found: $BOUND_SA, expected: $SA_NAME)"
exit 1
fi
echo "✅ Role and RoleBinding exist with correct configuration"
exit 0

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Validate RBAC permissions
NAMESPACE="default"
SA_NAME="app-sa"
ROLE_NAME="pod-reader"
ROLEBINDING_NAME="read-pods"
# Check if Role has correct API groups
API_GROUPS=$(kubectl get role $ROLE_NAME -o jsonpath='{.rules[0].apiGroups[*]}')
if [[ ! "$API_GROUPS" =~ "" ]]; then
echo "❌ Role '$ROLE_NAME' has incorrect API groups"
exit 1
fi
# Check if Role has correct resource names
RESOURCE_NAMES=$(kubectl get role $ROLE_NAME -o jsonpath='{.rules[0].resourceNames[*]}')
if [ -n "$RESOURCE_NAMES" ]; then
echo "❌ Role '$ROLE_NAME' has unexpected resource names restriction"
exit 1
fi
# Check if RoleBinding has correct namespace
RB_NAMESPACE=$(kubectl get rolebinding $ROLEBINDING_NAME -o jsonpath='{.metadata.namespace}')
if [ "$RB_NAMESPACE" != "$NAMESPACE" ]; then
echo "❌ RoleBinding '$ROLEBINDING_NAME' is in wrong namespace"
exit 1
fi
# Test pod listing permission
if ! kubectl auth can-i list pods --as=system:serviceaccount:$NAMESPACE:$SA_NAME; then
echo "❌ ServiceAccount does not have permission to list pods"
exit 1
fi
# Test pod getting permission
if ! kubectl auth can-i get pods --as=system:serviceaccount:$NAMESPACE:$SA_NAME; then
echo "❌ ServiceAccount does not have permission to get pods"
exit 1
fi
echo "✅ RBAC permissions are correctly configured"
exit 0

View File

@@ -0,0 +1,37 @@
#!/bin/bash
# Validate if NetworkPolicy exists with correct configuration
NAMESPACE="networking"
POLICY_NAME="db-policy"
DB_LABEL="role=db"
FRONTEND_LABEL="role=frontend"
# Check if NetworkPolicy exists
if ! kubectl get networkpolicy $POLICY_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ NetworkPolicy '$POLICY_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Check if policy targets pods with role=db label
TARGET_LABEL=$(kubectl get networkpolicy $POLICY_NAME -n $NAMESPACE -o jsonpath='{.spec.podSelector.matchLabels.role}')
if [ "$TARGET_LABEL" != "db" ]; then
echo "❌ NetworkPolicy '$POLICY_NAME' not targeting pods with label 'role=db'"
exit 1
fi
# Check if policy allows ingress from frontend pods
INGRESS_RULES=$(kubectl get networkpolicy $POLICY_NAME -n $NAMESPACE -o jsonpath='{.spec.ingress[*].from[*].podSelector.matchLabels.role}')
if [[ ! "$INGRESS_RULES" =~ "frontend" ]]; then
echo "❌ NetworkPolicy '$POLICY_NAME' not allowing ingress from pods with label 'role=frontend'"
exit 1
fi
# Check if policy allows port 3306
ALLOWED_PORTS=$(kubectl get networkpolicy $POLICY_NAME -n $NAMESPACE -o jsonpath='{.spec.ingress[*].ports[*].port}')
if [[ ! "$ALLOWED_PORTS" =~ "3306" ]]; then
echo "❌ NetworkPolicy '$POLICY_NAME' not allowing port 3306"
exit 1
fi
echo "✅ NetworkPolicy '$POLICY_NAME' exists with correct configuration"
exit 0

View File

@@ -0,0 +1,42 @@
#!/bin/bash
# Validate network policy effectiveness
NAMESPACE="networking"
FRONTEND_POD="frontend"
DB_POD="db"
# Check if frontend pod exists
if ! kubectl get pod $FRONTEND_POD -n $NAMESPACE &> /dev/null; then
echo "❌ Frontend pod not found"
exit 1
fi
# Check if db pod exists
if ! kubectl get pod $DB_POD -n $NAMESPACE &> /dev/null; then
echo "❌ Database pod not found"
exit 1
fi
# Check if network policy is applied to correct pods
POLICY_PODS=$(kubectl get networkpolicy db-policy -n $NAMESPACE -o jsonpath='{.spec.podSelector.matchLabels.role}')
if [ "$POLICY_PODS" != "db" ]; then
echo "❌ Network policy is not applied to correct pods"
exit 1
fi
# Check if network policy has correct policy types
POLICY_TYPES=$(kubectl get networkpolicy db-policy -n $NAMESPACE -o jsonpath='{.spec.policyTypes[*]}')
if [[ ! "$POLICY_TYPES" =~ "Ingress" ]]; then
echo "❌ Network policy missing Ingress policy type"
exit 1
fi
# Check if network policy has correct port configuration
POLICY_PORT=$(kubectl get networkpolicy db-policy -n $NAMESPACE -o jsonpath='{.spec.ingress[0].ports[0].port}')
if [ "$POLICY_PORT" != "3306" ]; then
echo "❌ Network policy has incorrect port configuration"
exit 1
fi
echo "✅ Network policy is correctly configured and effective"
exit 0

View File

@@ -0,0 +1,36 @@
#!/bin/bash
# Validate if Deployment exists with correct configuration
DEPLOYMENT_NAME="web-app"
EXPECTED_REPLICAS=3
EXPECTED_IMAGE="nginx:1.19"
# Check if Deployment exists
if ! kubectl get deployment $DEPLOYMENT_NAME &> /dev/null; then
echo "❌ Deployment '$DEPLOYMENT_NAME' not found"
exit 1
fi
# Check if correct number of replicas
REPLICAS=$(kubectl get deployment $DEPLOYMENT_NAME -o jsonpath='{.spec.replicas}')
if [ "$REPLICAS" != "$EXPECTED_REPLICAS" ]; then
echo "❌ Deployment '$DEPLOYMENT_NAME' has incorrect number of replicas: $REPLICAS (expected: $EXPECTED_REPLICAS)"
exit 1
fi
# Check if correct image is used
POD_IMAGE=$(kubectl get deployment $DEPLOYMENT_NAME -o jsonpath='{.spec.template.spec.containers[0].image}')
if [ "$POD_IMAGE" != "$EXPECTED_IMAGE" ]; then
echo "❌ Deployment '$DEPLOYMENT_NAME' using incorrect image: $POD_IMAGE (expected: $EXPECTED_IMAGE)"
exit 1
fi
# Check if deployment is available
AVAILABLE=$(kubectl get deployment $DEPLOYMENT_NAME -o jsonpath='{.status.availableReplicas}')
if [ "$AVAILABLE" != "$EXPECTED_REPLICAS" ]; then
echo "❌ Deployment '$DEPLOYMENT_NAME' is not fully available (available: $AVAILABLE, expected: $EXPECTED_REPLICAS)"
exit 1
fi
echo "✅ Deployment '$DEPLOYMENT_NAME' exists with correct configuration"
exit 0

View File

@@ -0,0 +1,44 @@
#!/bin/bash
# Validate if Service exists with correct configuration
SERVICE_NAME="web-service"
EXPECTED_TYPE="NodePort"
EXPECTED_PORT=80
EXPECTED_TARGET_PORT=80
# Check if Service exists
if ! kubectl get service $SERVICE_NAME &> /dev/null; then
echo "❌ Service '$SERVICE_NAME' not found"
exit 1
fi
# Check if service type is NodePort
SERVICE_TYPE=$(kubectl get service $SERVICE_NAME -o jsonpath='{.spec.type}')
if [ "$SERVICE_TYPE" != "$EXPECTED_TYPE" ]; then
echo "❌ Service '$SERVICE_NAME' has incorrect type: $SERVICE_TYPE (expected: $EXPECTED_TYPE)"
exit 1
fi
# Check if port is configured correctly
SERVICE_PORT=$(kubectl get service $SERVICE_NAME -o jsonpath='{.spec.ports[0].port}')
if [ "$SERVICE_PORT" != "$EXPECTED_PORT" ]; then
echo "❌ Service '$SERVICE_NAME' has incorrect port: $SERVICE_PORT (expected: $EXPECTED_PORT)"
exit 1
fi
# Check if target port is configured correctly
TARGET_PORT=$(kubectl get service $SERVICE_NAME -o jsonpath='{.spec.ports[0].targetPort}')
if [ "$TARGET_PORT" != "$EXPECTED_TARGET_PORT" ]; then
echo "❌ Service '$SERVICE_NAME' has incorrect target port: $TARGET_PORT (expected: $EXPECTED_TARGET_PORT)"
exit 1
fi
# Check if service has endpoints
ENDPOINTS=$(kubectl get endpoints $SERVICE_NAME -o jsonpath='{.subsets[*].addresses[*].ip}' | wc -w)
if [ "$ENDPOINTS" -eq 0 ]; then
echo "❌ Service '$SERVICE_NAME' has no endpoints"
exit 1
fi
echo "✅ Service '$SERVICE_NAME' exists with correct configuration"
exit 0

View File

@@ -0,0 +1,61 @@
#!/bin/bash
# Validate if resource-constrained pod exists with correct configuration
NAMESPACE="monitoring"
POD_NAME="resource-pod"
EXPECTED_IMAGE="nginx"
EXPECTED_CPU_REQUEST="100m"
EXPECTED_MEMORY_REQUEST="128Mi"
EXPECTED_CPU_LIMIT="200m"
EXPECTED_MEMORY_LIMIT="256Mi"
# Check if pod exists
if ! kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Check if pod is running
POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' exists but is not running (status: $POD_STATUS)"
exit 1
fi
# Check if correct image is used
POD_IMAGE=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].image}')
if [ "$POD_IMAGE" != "$EXPECTED_IMAGE" ]; then
echo "❌ Pod '$POD_NAME' using incorrect image: $POD_IMAGE (expected: $EXPECTED_IMAGE)"
exit 1
fi
# Check resource requests
CPU_REQUEST=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.requests.cpu}')
MEMORY_REQUEST=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.requests.memory}')
if [ "$CPU_REQUEST" != "$EXPECTED_CPU_REQUEST" ]; then
echo "❌ Pod '$POD_NAME' has incorrect CPU request: $CPU_REQUEST (expected: $EXPECTED_CPU_REQUEST)"
exit 1
fi
if [ "$MEMORY_REQUEST" != "$EXPECTED_MEMORY_REQUEST" ]; then
echo "❌ Pod '$POD_NAME' has incorrect memory request: $MEMORY_REQUEST (expected: $EXPECTED_MEMORY_REQUEST)"
exit 1
fi
# Check resource limits
CPU_LIMIT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.limits.cpu}')
MEMORY_LIMIT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.limits.memory}')
if [ "$CPU_LIMIT" != "$EXPECTED_CPU_LIMIT" ]; then
echo "❌ Pod '$POD_NAME' has incorrect CPU limit: $CPU_LIMIT (expected: $EXPECTED_CPU_LIMIT)"
exit 1
fi
if [ "$MEMORY_LIMIT" != "$EXPECTED_MEMORY_LIMIT" ]; then
echo "❌ Pod '$POD_NAME' has incorrect memory limit: $MEMORY_LIMIT (expected: $EXPECTED_MEMORY_LIMIT)"
exit 1
fi
echo "✅ Pod '$POD_NAME' exists with correct resource configuration"
exit 0

View File

@@ -0,0 +1,73 @@
#!/bin/bash
# Validate resource usage and limits
NAMESPACE="monitoring"
POD_NAME="resource-pod"
# Check if pod exists and is running
if ! kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found"
exit 1
fi
POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' is not running"
exit 1
fi
# Check resource requests
CPU_REQUEST=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.requests.cpu}')
MEMORY_REQUEST=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.requests.memory}')
if [ "$CPU_REQUEST" != "100m" ]; then
echo "❌ Incorrect CPU request: $CPU_REQUEST (expected: 100m)"
exit 1
fi
if [ "$MEMORY_REQUEST" != "128Mi" ]; then
echo "❌ Incorrect memory request: $MEMORY_REQUEST (expected: 128Mi)"
exit 1
fi
# Check resource limits
CPU_LIMIT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.limits.cpu}')
MEMORY_LIMIT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].resources.limits.memory}')
if [ "$CPU_LIMIT" != "200m" ]; then
echo "❌ Incorrect CPU limit: $CPU_LIMIT (expected: 200m)"
exit 1
fi
if [ "$MEMORY_LIMIT" != "256Mi" ]; then
echo "❌ Incorrect memory limit: $MEMORY_LIMIT (expected: 256Mi)"
exit 1
fi
# Check actual resource usage
CPU_USAGE=$(kubectl top pod $POD_NAME -n $NAMESPACE --no-headers | awk '{print $2}')
MEMORY_USAGE=$(kubectl top pod $POD_NAME -n $NAMESPACE --no-headers | awk '{print $3}')
# Convert CPU usage to millicores for comparison
CPU_MILLICORES=$(echo $CPU_USAGE | sed 's/m//')
if [ "$CPU_MILLICORES" -gt 200 ]; then
echo "❌ CPU usage exceeds limit: $CPU_USAGE"
exit 1
fi
# Convert memory usage to Mi for comparison
MEMORY_MI=$(echo $MEMORY_USAGE | sed 's/Mi//')
if [ "$MEMORY_MI" -gt 256 ]; then
echo "❌ Memory usage exceeds limit: $MEMORY_USAGE"
exit 1
fi
# Check if pod has been restarted due to resource issues
RESTARTS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[0].restartCount}')
if [ "$RESTARTS" -gt 0 ]; then
echo "❌ Pod has been restarted $RESTARTS times"
exit 1
fi
echo "✅ Resource configuration and usage are correct"
exit 0

View File

@@ -0,0 +1,28 @@
#!/bin/bash
# Validate if ConfigMap exists with correct configuration
CONFIGMAP_NAME="app-config"
EXPECTED_KEY="APP_COLOR"
EXPECTED_VALUE="blue"
# Check if ConfigMap exists
if ! kubectl get configmap $CONFIGMAP_NAME &> /dev/null; then
echo "❌ ConfigMap '$CONFIGMAP_NAME' not found"
exit 1
fi
# Check if ConfigMap has the required key
if ! kubectl get configmap $CONFIGMAP_NAME -o jsonpath='{.data.APP_COLOR}' &> /dev/null; then
echo "❌ ConfigMap '$CONFIGMAP_NAME' missing required key '$EXPECTED_KEY'"
exit 1
fi
# Check if ConfigMap has the correct value
CONFIG_VALUE=$(kubectl get configmap $CONFIGMAP_NAME -o jsonpath='{.data.APP_COLOR}')
if [ "$CONFIG_VALUE" != "$EXPECTED_VALUE" ]; then
echo "❌ ConfigMap '$CONFIGMAP_NAME' has incorrect value for '$EXPECTED_KEY': $CONFIG_VALUE (expected: $EXPECTED_VALUE)"
exit 1
fi
echo "✅ ConfigMap '$CONFIGMAP_NAME' exists with correct configuration"
exit 0

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Validate if pod using ConfigMap exists with correct configuration
POD_NAME="config-pod"
CONFIGMAP_NAME="app-config"
EXPECTED_IMAGE="nginx"
# Check if pod exists
if ! kubectl get pod $POD_NAME &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found"
exit 1
fi
# Check if pod is running
POD_STATUS=$(kubectl get pod $POD_NAME -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' exists but is not running (status: $POD_STATUS)"
exit 1
fi
# Check if correct image is used
POD_IMAGE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].image}')
if [ "$POD_IMAGE" != "$EXPECTED_IMAGE" ]; then
echo "❌ Pod '$POD_NAME' using incorrect image: $POD_IMAGE (expected: $EXPECTED_IMAGE)"
exit 1
fi
# Check if ConfigMap is mounted
CONFIGMAP_MOUNT=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.volumes[?(@.configMap.name=="app-config")].name}')
if [ -z "$CONFIGMAP_MOUNT" ]; then
echo "❌ Pod '$POD_NAME' not mounting ConfigMap '$CONFIGMAP_NAME'"
exit 1
fi
# Check if ConfigMap is mounted at correct path
MOUNT_PATH=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].volumeMounts[?(@.name=="config-volume")].mountPath}')
if [ "$MOUNT_PATH" != "/etc/config" ]; then
echo "❌ Pod '$POD_NAME' mounting ConfigMap at incorrect path: $MOUNT_PATH (expected: /etc/config)"
exit 1
fi
echo "✅ Pod '$POD_NAME' exists with correct ConfigMap configuration"
exit 0

View File

@@ -0,0 +1,60 @@
#!/bin/bash
# Validate ConfigMap usage and mounting
POD_NAME="config-pod"
CONFIGMAP_NAME="app-config"
# Check if pod exists and is running
if ! kubectl get pod $POD_NAME &> /dev/null; then
echo "❌ Pod '$POD_NAME' not found"
exit 1
fi
POD_STATUS=$(kubectl get pod $POD_NAME -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' is not running"
exit 1
fi
# Check if ConfigMap exists
if ! kubectl get configmap $CONFIGMAP_NAME &> /dev/null; then
echo "❌ ConfigMap '$CONFIGMAP_NAME' not found"
exit 1
fi
# Check if ConfigMap has correct data
CONFIG_VALUE=$(kubectl get configmap $CONFIGMAP_NAME -o jsonpath='{.data.APP_COLOR}')
if [ "$CONFIG_VALUE" != "blue" ]; then
echo "❌ ConfigMap has incorrect value: $CONFIG_VALUE (expected: blue)"
exit 1
fi
# Check if volume is properly mounted
VOLUME_MOUNT=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.volumes[?(@.name=="config-volume")].configMap.name}')
if [ "$VOLUME_MOUNT" != "$CONFIGMAP_NAME" ]; then
echo "❌ ConfigMap is not properly mounted as volume"
exit 1
fi
# Check if volume is mounted at correct path
MOUNT_PATH=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.containers[0].volumeMounts[?(@.name=="config-volume")].mountPath}')
if [ "$MOUNT_PATH" != "/etc/config" ]; then
echo "❌ ConfigMap is mounted at incorrect path: $MOUNT_PATH"
exit 1
fi
# Verify ConfigMap content in pod
POD_CONFIG_VALUE=$(kubectl exec $POD_NAME -- cat /etc/config/APP_COLOR)
if [ "$POD_CONFIG_VALUE" != "blue" ]; then
echo "❌ ConfigMap value not correctly mounted in pod"
exit 1
fi
# Check if pod can read the mounted ConfigMap
if ! kubectl exec $POD_NAME -- ls -l /etc/config/APP_COLOR &> /dev/null; then
echo "❌ Pod cannot access mounted ConfigMap file"
exit 1
fi
echo "✅ ConfigMap is correctly configured and mounted"
exit 0

View File

@@ -90,7 +90,7 @@ kubectl logs <pod-name> -n troubleshooting
Potential fixes: Potential fixes:
1. If the image is incorrect: 1. If the image is incorrect:
```bash ```bash
kubectl set image deployment/broken-app container-name=correct-image:tag -n troubleshooting kubectl set image deployments -n troubleshooting broken-app app=nginx:latest
``` ```
2. If environment variables are missing: 2. If environment variables are missing:
```bash ```bash
@@ -101,89 +101,109 @@ Potential fixes:
kubectl patch deployment broken-app -n troubleshooting -p '{"spec":{"template":{"spec":{"containers":[{"name":"container-name","resources":{"limits":{"memory":"512Mi"}}}]}}}}' kubectl patch deployment broken-app -n troubleshooting -p '{"spec":{"template":{"spec":{"containers":[{"name":"container-name","resources":{"limits":{"memory":"512Mi"}}}]}}}}'
``` ```
## Question 6: The kubelet on node 'worker-1' is not functioning properly. Diagnose and fix the issue ## Question 6: Create a multi-container pod with sidecar logging pattern
Troubleshooting steps: ```yaml
```bash apiVersion: v1
# Check node status kind: Pod
kubectl get nodes metadata:
name: sidecar-pod
# Describe the node for more information namespace: troubleshooting
kubectl describe node worker-1 spec:
containers:
# SSH into the worker node - name: nginx
ssh worker-1 image: nginx
volumeMounts:
# Check kubelet status - name: log-volume
systemctl status kubelet mountPath: /var/log
- name: sidecar
# Check kubelet logs image: busybox
journalctl -u kubelet -n 100 command: ["sh", "-c", "while true; do date >> /var/log/date.log; sleep 10; done"]
volumeMounts:
# Restart kubelet if needed - name: log-volume
systemctl restart kubelet mountPath: /var/log
volumes:
# Check kubelet configuration - name: log-volume
cat /var/lib/kubelet/config.yaml emptyDir: {}
``` ```
Common kubelet issues: Save this as `sidecar-pod.yaml` and apply:
1. Service not running: `systemctl start kubelet`
2. Configuration errors: Edit `/var/lib/kubelet/config.yaml` ```bash
3. Certificate issues: Renew certificates if needed kubectl apply -f sidecar-pod.yaml
4. Disk space issues: `df -h` to check and clean up if needed ```
You can verify the pod is working correctly:
```bash
# Check that both containers are running in the pod
kubectl get pod sidecar-pod -n troubleshooting
# Verify the shared volume is mounted and the log file is being written
kubectl exec -it sidecar-pod -n troubleshooting -c nginx -- cat /var/log/date.log
# Check events related to the pod
kubectl describe pod sidecar-pod -n troubleshooting
```
## Question 7: Service 'web-service' in namespace 'troubleshooting' is not routing traffic to pods properly. Identify and fix the issue ## Question 7: Service 'web-service' in namespace 'troubleshooting' is not routing traffic to pods properly. Identify and fix the issue
Troubleshooting steps: Troubleshooting steps:
```bash ```bash
# Check the service # Check the service configuration
kubectl get svc web-service -n troubleshooting kubectl get svc web-service -n troubleshooting
# Describe the service to check selector labels # Examine the service details to identify selector and port configuration issues
kubectl describe svc web-service -n troubleshooting kubectl describe svc web-service -n troubleshooting
# Check if there are pods matching the selector
kubectl get pods -l <service-selector-label> -n troubleshooting
``` ```
Common fixes: Solution approach based on typical issues:
1. Fix service selector to match pod labels:
```bash
kubectl edit svc web-service -n troubleshooting
```
2. Fix pod labels to match service selector:
```bash
kubectl label pods <pod-name> key=value -n troubleshooting
```
3. Fix service port mapping:
```bash
kubectl edit svc web-service -n troubleshooting
```
## Question 8: Pod 'logging-pod' in namespace 'troubleshooting' is experiencing high CPU usage. Identify the container causing the issue and take appropriate action to limit its CPU usage 1. If the service selector doesn't match any pod labels, fix the service selector:
```bash
# Check current resource usage
kubectl top pod logging-pod -n troubleshooting
kubectl top pod logging-pod -n troubleshooting --containers
# Add CPU limits to the container
kubectl patch pod logging-pod -n troubleshooting -p '{"spec":{"containers":[{"name":"<container-name>","resources":{"limits":{"cpu":"200m"}}}]}}'
```
Or edit the deployment if the pod is managed by one:
```bash
kubectl edit deployment <deployment-name> -n troubleshooting
```
Add the following to the container spec:
```yaml ```yaml
resources: apiVersion: v1
limits: kind: Service
cpu: 200m metadata:
requests: name: web-service
cpu: 100m namespace: troubleshooting
spec:
selector:
app: web-app
ports:
- port: 80
targetPort: 80
```
2. Save as `fixed-service.yaml` and apply:
```bash
kubectl apply -f fixed-service.yaml
```
## Question 8: Pod 'logging-pod' in namespace 'troubleshooting' is consuming excessive CPU resources. Set appropriate CPU and memory limits
Solution:
1. After identifying which container is causing high CPU usage, edit the pod to add resource limits:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: logging-pod
namespace: troubleshooting
spec:
containers:
- name: <container-name>
# ... existing container configuration ...
resources:
limits:
cpu: 100m
memory: 50Mi
```
Edit the pod to add resource limits and will be saved to tmp/<file.yaml>
```
kubectl replace -f tmp/<file.yaml> --force
``` ```
## Question 9: Create a ConfigMap named 'app-config' in namespace 'workloads' containing the following key-value pairs: APP_ENV=production, LOG_LEVEL=info. Then create a Pod named 'config-pod' using 'nginx' image that mounts these configurations as environment variables ## Question 9: Create a ConfigMap named 'app-config' in namespace 'workloads' containing the following key-value pairs: APP_ENV=production, LOG_LEVEL=info. Then create a Pod named 'config-pod' using 'nginx' image that mounts these configurations as environment variables
@@ -215,6 +235,13 @@ spec:
configMapKeyRef: configMapKeyRef:
name: app-config name: app-config
key: LOG_LEVEL key: LOG_LEVEL
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
``` ```
Save as `config-pod.yaml` and apply: Save as `config-pod.yaml` and apply:
@@ -239,7 +266,7 @@ metadata:
spec: spec:
containers: containers:
- name: mysql - name: mysql
image: mysql:5.7 image: mysql:latest
env: env:
- name: DB_USER - name: DB_USER
valueFrom: valueFrom:
@@ -256,6 +283,7 @@ spec:
secretKeyRef: secretKeyRef:
name: db-credentials name: db-credentials
key: password key: password
restartPolicy: Always
``` ```
Save as `secure-pod.yaml` and apply: Save as `secure-pod.yaml` and apply:
@@ -263,39 +291,41 @@ Save as `secure-pod.yaml` and apply:
kubectl apply -f secure-pod.yaml kubectl apply -f secure-pod.yaml
``` ```
## Question 11: Create a Horizontal Pod Autoscaler for the deployment 'web-app' in namespace 'workloads' that scales between 2 and 6 replicas based on 70% CPU utilization ## Question 11: Create a CronJob named 'log-cleaner' in namespace 'workloads' that runs hourly to clean up log files
```bash
# Create the HPA
kubectl autoscale deployment web-app -n workloads --min=2 --max=6 --cpu-percent=70
```
Or using YAML:
```yaml ```yaml
apiVersion: autoscaling/v2 apiVersion: batch/v1
kind: HorizontalPodAutoscaler kind: CronJob
metadata: metadata:
name: web-app name: log-cleaner
namespace: workloads namespace: workloads
spec: spec:
scaleTargetRef: schedule: "0 * * * *" # Run every hour at minute 0
apiVersion: apps/v1 concurrencyPolicy: Forbid # Skip new job if previous is running
kind: Deployment successfulJobsHistoryLimit: 3 # Keep 3 successful job completions
name: web-app failedJobsHistoryLimit: 1 # Keep 1 failed job
minReplicas: 2 jobTemplate:
maxReplicas: 6 spec:
metrics: template:
- type: Resource spec:
resource: containers:
name: cpu - name: log-cleaner
target: image: busybox
type: Utilization command: ["/bin/sh", "-c"]
averageUtilization: 70 args:
- find /var/log -type f -name "*.log" -mtime +7 -delete
restartPolicy: OnFailure
``` ```
Save as `hpa.yaml` and apply: Save this as `log-cleaner-cronjob.yaml` and apply:
```bash ```bash
kubectl apply -f hpa.yaml kubectl apply -f log-cleaner-cronjob.yaml
```
You can check the cron job configuration:
```bash
kubectl get cronjob log-cleaner -n workloads -o yaml
``` ```
## Question 12: Create a Pod named 'health-pod' in namespace 'workloads' using 'nginx' image with a liveness probe that checks the path /healthz on port 80 every 15 seconds, and a readiness probe that checks port 80 every 10 seconds ## Question 12: Create a Pod named 'health-pod' in namespace 'workloads' using 'nginx' image with a liveness probe that checks the path /healthz on port 80 every 15 seconds, and a readiness probe that checks port 80 every 10 seconds
@@ -376,21 +406,30 @@ kubectl apply -f cluster-role.yaml
kubectl apply -f cluster-role-binding.yaml kubectl apply -f cluster-role-binding.yaml
``` ```
## Question 14: Install Helm and use it to deploy the Prometheus monitoring stack in the 'monitoring' namespace ## Question 14: Deploy the Bitnami Nginx chart in the 'web' namespace using Helm
```bash ```bash
# Install Helm # Create the namespace if it doesn't exist
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash kubectl create namespace web
# Create the namespace # Add the Bitnami charts repository
kubectl create namespace monitoring helm repo add bitnami https://charts.bitnami.com/bitnami
# Add Prometheus repo # Update Helm repositories
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update helm repo update
# Install Prometheus stack # Install Bitnami's Nginx chart with 2 replicas
helm install prometheus prometheus-community/kube-prometheus-stack --namespace monitoring helm install nginx bitnami/nginx --namespace web --set replicaCount=2
# Verify the deployment
kubectl get pods -n web
kubectl get svc -n web
```
You can inspect the installation and configuration:
```bash
helm list -n web
kubectl get deployment -n web
``` ```
## Question 15: Create a CRD (CustomResourceDefinition) for a new resource type 'Backup' in API group 'data.example.com' with version 'v1alpha1' that includes fields 'spec.source' and 'spec.destination' ## Question 15: Create a CRD (CustomResourceDefinition) for a new resource type 'Backup' in API group 'data.example.com' with version 'v1alpha1' that includes fields 'spec.source' and 'spec.destination'
@@ -460,10 +499,6 @@ kubectl apply -f network-policy.yaml
## Question 17: Create a ClusterIP service named 'internal-app' in namespace 'networking' that routes traffic to pods with label 'app=backend' on port 8080, exposing the service on port 80 ## Question 17: Create a ClusterIP service named 'internal-app' in namespace 'networking' that routes traffic to pods with label 'app=backend' on port 8080, exposing the service on port 80
```bash
kubectl create service clusterip internal-app --tcp=80:8080 -n networking --selector=app=backend
```
Or using YAML: Or using YAML:
```yaml ```yaml
apiVersion: v1 apiVersion: v1
@@ -486,13 +521,8 @@ Save as `internal-service.yaml` and apply:
kubectl apply -f internal-service.yaml kubectl apply -f internal-service.yaml
``` ```
## Question 18: Create a LoadBalancer service named 'public-web' in namespace 'networking' that exposes port 80 for the deployment 'web-frontend' ## Question 18: Create a NodePort service named public-web in namespace networking that will expose the web-frontend deployment to external users.
```bash
kubectl expose deployment web-frontend --type=LoadBalancer --port=80 --name=public-web -n networking
```
Or using YAML:
```yaml ```yaml
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@@ -500,13 +530,16 @@ metadata:
name: public-web name: public-web
namespace: networking namespace: networking
spec: spec:
type: LoadBalancer type: NodePort
selector: selector:
app: web-frontend app: web-frontend
ports: ports:
- port: 80 - name: http
targetPort: 80 protocol: TCP
protocol: TCP port: 80
targetPort: 8080
nodePort: 30080
``` ```
Save as `loadbalancer-service.yaml` and apply: Save as `loadbalancer-service.yaml` and apply:
@@ -541,24 +574,65 @@ Save as `ingress.yaml` and apply:
kubectl apply -f ingress.yaml kubectl apply -f ingress.yaml
``` ```
## Question 20: Configure CoreDNS to add a custom entry that resolves 'database.local' to the IP address 10.96.0.20 ## Question 20: Create a simple Kubernetes Job named 'hello-job' that executes a command and completes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: hello-job
namespace: networking
spec:
activeDeadlineSeconds: 30
template:
spec:
containers:
- name: hello
image: busybox
command: ["sh", "-c", "echo 'Hello from Kubernetes job!'"]
restartPolicy: Never
backoffLimit: 0
```
Save this as `hello-job.yaml` and apply:
```bash ```bash
# Edit the CoreDNS ConfigMap kubectl apply -f hello-job.yaml
kubectl edit configmap coredns -n kube-system
``` ```
Add the following to the Corefile data: You can check the job's status and output:
```
hosts {
10.96.0.20 database.local
fallthrough
}
```
Restart CoreDNS pods:
```bash ```bash
kubectl delete pod -l k8s-app=kube-dns -n kube-system # Check job status
kubectl get jobs -n networking
# View the pod created by the job
kubectl get pods -n networking -l job-name=hello-job
# Check the logs to see the output message
kubectl logs -n networking -l job-name=hello-job
``` ```
Or create a custom ConfigMap with hosts entries and mount it in the CoreDNS deployment. ## Question 21: Work with the Open Container Initiative (OCI) format
```bash
# Pull the image
docker pull nginx:latest
# Save the image to a tarball
docker save nginx:latest -o /tmp/nginx-image.tar
# Create the OCI directory
mkdir -p /root/oci-images
# Extract the tarball to the OCI directory
tar -xf /tmp/nginx-image.tar -C /root/oci-images
# Clean up the tarball
rm /tmp/nginx-image.tar
```
Check the result:
```bash
ls -la /root/oci-images
cat /root/oci-images/index.json # Verify it's in OCI format
```

View File

@@ -78,7 +78,7 @@
"id": "3", "id": "3",
"namespace": "storage-test", "namespace": "storage-test",
"machineHostname": "ckad9999", "machineHostname": "ckad9999",
"question": "As part of optimizing storage resources in the cluster, create a StorageClass named `slow-storage` that will dynamically provision storage resources. \n\nUse the provisioner `kubernetes.io/no-provisioner` for this local storage class. \n\nSet the volumeBindingMode to `WaitForFirstConsumer` to delay volume binding until a pod using the PVC is created. \n\nThis storage class will be used for applications that require optimized local storage performance.", "question": "As part of optimizing storage resources in the cluster, create a StorageClass named `fast-storage` that will dynamically provision storage resources. \n\nUse the provisioner `kubernetes.io/no-provisioner` for this local storage class. \n\nSet the volumeBindingMode to `WaitForFirstConsumer` to delay volume binding until a pod using the PVC is created. \n\nThis storage class will be used for applications that require optimized local storage performance.",
"concepts": ["storage-class", "provisioners", "volume-binding"], "concepts": ["storage-class", "provisioners", "volume-binding"],
"verification": [ "verification": [
{ {
@@ -214,7 +214,7 @@
"id": "8", "id": "8",
"namespace": "troubleshooting", "namespace": "troubleshooting",
"machineHostname": "ckad9999", "machineHostname": "ckad9999",
"question": "The operations team has reported performance degradation in the cluster. \n\nPod `logging-pod` in namespace `troubleshooting` is consuming excessive CPU resources, affecting other workloads in the cluster. \n\nYour task is to: \n\n1. Identify which container within the pod is causing the high CPU usage \n\n2. Configure appropriate CPU limits for that container to prevent resource abuse while ensuring the application can still function \n\n3. Implement your solution by modifying the pod specification with the necessary resource constraints \n\nEnsure that the pod continues to run successfully after your changes, but with its CPU usage kept within reasonable bounds as defined by the limits you set.", "question": "The operations team has reported performance degradation in the cluster. \n\nPod `logging-pod` in namespace `troubleshooting` is consuming excessive CPU resources, affecting other workloads in the cluster. \n\nYour task is to: \n\n1. Identify which container within the pod is causing the high CPU usage \n\n2. Configure appropriate CPU limits `100m` and memory limits `50Mi` for that container to prevent resource abuse while ensuring the application can still function \n\n3. Implement your solution by modifying the pod specification with the necessary resource constraints \n\nEnsure that the pod continues to run successfully after your changes, but with its CPU usage kept within reasonable bounds as defined by the limits you set.",
"concepts": ["resource-limits", "resource-requests", "cpu-management", "troubleshooting"], "concepts": ["resource-limits", "resource-requests", "cpu-management", "troubleshooting"],
"verification": [ "verification": [
{ {
@@ -226,17 +226,10 @@
}, },
{ {
"id": "2", "id": "2",
"description": "Pod is running", "description": "CPU and Memory limits are configured correctly",
"verificationScriptFile": "q8_s2_validate_pod_running.sh", "verificationScriptFile": "q8_s2_validate_pod_running.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 1 "weightage": 1
},
{
"id": "3",
"description": "CPU usage under threshold",
"verificationScriptFile": "q8_s3_validate_cpu_usage.sh",
"expectedOutput": "0",
"weightage": 2
} }
] ]
}, },
@@ -348,7 +341,7 @@
"id": "12", "id": "12",
"namespace": "workloads", "namespace": "workloads",
"machineHostname": "ckad9999", "machineHostname": "ckad9999",
"question": "To ensure application reliability, the team needs to implement health checking for a critical service. \n\nCreate a Pod named `health-pod` in namespace `workloads` using the `nginx` image with the following health monitoring configuration: \n\n1) A liveness probe that performs an HTTP GET request to the path `/healthz` on port `80` every 15 seconds to determine if the container is alive. If this check fails, Kubernetes will restart the container. \n\n2) A readiness probe that checks if the container is ready to serve traffic by testing if port `80` is open and accepting connections every 10 seconds. \n\nConfigure appropriate initial delay, timeout, and failure threshold values based on best practices.", "question": "To ensure application reliability, the team needs to implement health checking for a critical service. \n\nCreate a Pod named `health-pod` in namespace `workloads` using the `emilevauge/whoami` image with the following health monitoring configuration: \n\n1) A liveness probe that performs an HTTP GET request to the path `/healthz` on port `80` every `15` seconds to determine if the container is alive. If this check fails, Kubernetes will restart the container. \n\n2) A readiness probe that checks if the container is ready to serve traffic by testing if port `80` is open and accepting connections every 10 seconds. \n\nConfigure appropriate initial delay, timeout, and failure threshold values based on best practices.",
"concepts": ["pods", "probes", "liveness-probe", "readiness-probe", "health-checks"], "concepts": ["pods", "probes", "liveness-probe", "readiness-probe", "health-checks"],
"verification": [ "verification": [
{ {
@@ -378,7 +371,7 @@
"id": "13", "id": "13",
"namespace": "cluster-admin", "namespace": "cluster-admin",
"machineHostname": "ckad9999", "machineHostname": "ckad9999",
"question": "To implement proper access control and security segmentation in the cluster, you need to configure RBAC resources. \n\nFirst, create a `ClusterRole` named `pod-reader` that defines a set of permissions for pod operations. This role should specifically allow three operations on pods: `get` (view individual pods), `watch` (receive notifications about pod changes), and `list` (view collections of pods). \n\nNext, create a `ClusterRoleBinding` named `read-pods` that associates this role with the user `jane` in the namespace `cluster-admin`. \n\nThis binding will grant user `Jane` read-only access to pod resources across all namespaces in the cluster, following the principle of least privilege while allowing her to perform her monitoring duties.", "question": "In the `cluster-admin` namespace, implement proper access control and security segmentation in the cluster by configuring RBAC resources. \n\nFirst, create a `ClusterRole` named `pod-reader` that defines a set of permissions for pod operations. This role should specifically allow three operations on pods: `get` (view individual pods), `watch` (receive notifications about pod changes), and `list` (view collections of pods). \n\nNext, create a `ClusterRoleBinding` named `read-pods` that associates this role with the user `jane` in the namespace `cluster-admin`. \n\nThis binding will grant user `Jane` read-only access to pod resources across all namespaces in the cluster, following the principle of least privilege while allowing her to perform her monitoring duties.",
"concepts": ["rbac", "cluster-role", "cluster-role-binding", "authorization"], "concepts": ["rbac", "cluster-role", "cluster-role-binding", "authorization"],
"verification": [ "verification": [
{ {
@@ -507,7 +500,7 @@
"id": "18", "id": "18",
"namespace": "networking", "namespace": "networking",
"machineHostname": "ckad9999", "machineHostname": "ckad9999",
"question": "A public-facing web application needs to be exposed to external users. \n\nCreate a NodePort service named `public-web` in namespace `networking` that will expose the `web-frontend` deployment to external users. \n\nConfigure the service to accept external traffic on port `80` and forward it to port `8080` on the deployment`s pods. Set the NodePort to `30080`. \n\nUsing a `NodePort` service will expose the application on a static port on each node in the cluster, making it accessible via any node`s IP address. \n\nEnsure the service selector correctly targets the `web-frontend` deployment pods and that the port configuration is appropriate for a web application. \n\nThis setup will enable external users to access the web application through `<node-ip>:30080`.", "question": "A public-facing web application needs to be exposed to external users. \n\nCreate a NodePort service named `public-web` in namespace `networking` that will expose the `web-frontend` deployment to external users. \n\nConfigure the service to accept external traffic on port `80` and forward it to port `8080` on the deployment's pods. Set the NodePort to `30080`. \n\nUsing a `NodePort` service will expose the application on a static port on each node in the cluster, making it accessible via any node`s IP address. \n\nEnsure the service selector correctly targets the `web-frontend` deployment pods and that the port configuration is appropriate for a web application. \n\nThis setup will enable external users to access the web application through `<node-ip>:30080`.",
"concepts": ["services", "nodeport", "networking", "exposing-apps"], "concepts": ["services", "nodeport", "networking", "exposing-apps"],
"verification": [ "verification": [
{ {
@@ -537,7 +530,7 @@
"id": "19", "id": "19",
"namespace": "networking", "namespace": "networking",
"machineHostname": "ckad9999", "machineHostname": "ckad9999",
"question": "The API team needs to implement host-based routing for their services. \n\nCreate an Ingress resource named `api-ingress` in namespace `networking` that implements the following routing rule: \n\n- All HTTP traffic for the hostname `api.example.com` should be directed to the service `api-service` on port `80`. \n\nThis Ingress will utilize the cluster`s ingress controller to provide more sophisticated HTTP routing than is possible with Services alone. \n\nMake sure to properly configure the host field with the exact domain name and set up the correct backend service reference.", "question": "The API team needs to implement host-based routing for their services. \n\nCreate an Ingress resource named `api-ingress` in namespace `networking` that implements the following routing rule: \n\n- All HTTP traffic for the hostname `api.example.com` should be directed to the service `api-service` on port `80`. \n\nThis Ingress will utilize the cluster's ingress controller to provide more sophisticated HTTP routing than is possible with Services alone. \n\nMake sure to properly configure the host field with the exact domain name and set up the correct backend service reference.",
"concepts": ["ingress", "networking", "host-based-routing", "http-routing"], "concepts": ["ingress", "networking", "host-based-routing", "http-routing"],
"verification": [ "verification": [
{ {
@@ -567,15 +560,15 @@
"id": "20", "id": "20",
"namespace": "networking", "namespace": "networking",
"machineHostname": "ckad9999", "machineHostname": "ckad9999",
"question": "A one-time configuration backup operation needs to be performed. \n\nCreate a Kubernetes Job named `backup-job` in the `networking` namespace to handle this task. \n\nThe job should create a single pod using the `busybox` image with a command that copies all files from the directory `/etc/config` to the `/backup` directory. \n\nConfigure the job with the following specifications: \n\n- Set `restartPolicy: Never` to ensure that containers are not restarted after completion or failure \n- Set `backoffLimit: 0` so that the job will not be retried if it fails \n\nThis job represents a one-time, batch operation that should either complete successfully or fail without retries, allowing administrators to then investigate any issues manually. \n\nVerify that the job completes successfully and that the files are properly copied to the destination directory.", "question": "Create a simple batch processing task to demonstrate the Job resource type. \n\nCreate a Kubernetes Job named `hello-job` in the `networking` namespace that runs a pod with the `busybox` image. \n\nThe job should execute a single command that prints 'Hello from Kubernetes job!' to standard output, and then completes successfully. \n\nConfigure the job to: \n\n1. Run only once and not be restarted after completion \n2. Have a deadline of 30 seconds (the job will be terminated if it doesn't complete within this time) \n3. Use `Never` as the restart policy for the pod \n\nThis job demonstrates the basic pattern for one-time task execution in Kubernetes.",
"concepts": ["jobs", "batch-processing", "pods", "containers"], "concepts": ["jobs", "batch-processing", "pods", "containers"],
"verification": [ "verification": [
{ {
"id": "1", "id": "1",
"description": "Job is created and completes successfully", "description": "Job is created with correct name",
"verificationScriptFile": "q20_s1_validate_job_completed.sh", "verificationScriptFile": "q20_s1_validate_job_created.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 3 "weightage": 2
}, },
{ {
"id": "2", "id": "2",
@@ -583,6 +576,13 @@
"verificationScriptFile": "q20_s2_validate_job_config.sh", "verificationScriptFile": "q20_s2_validate_job_config.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 2
},
{
"id": "3",
"description": "Job completes successfully",
"verificationScriptFile": "q20_s3_validate_job_completed.sh",
"expectedOutput": "0",
"weightage": 1
} }
] ]
}, },

View File

@@ -47,7 +47,7 @@ spec:
app: web # Incorrect selector, should be app=web-app app: web # Incorrect selector, should be app=web-app
ports: ports:
- port: 80 - port: 80
targetPort: 80 targetPort: 3030
EOF EOF
echo "Setup complete for Question 7: Created service 'web-service' with incorrect selector" echo "Setup complete for Question 7: Created service 'web-service' with incorrect selector"

View File

@@ -0,0 +1,16 @@
#!/bin/bash
# Validate if the pod 'logging-pod' is running in the 'troubleshooting' namespace
POD_NAME="secure-pod"
NAMESPACE="workloads"
# Check if the pod is running
POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}')
if [ "$POD_STATUS" = "Running" ]; then
echo "Success: Pod '$POD_NAME' is running in namespace '$NAMESPACE'"
exit 0
else
echo "Error: Pod '$POD_NAME' is not running in namespace '$NAMESPACE'"
exit 1
fi

View File

@@ -0,0 +1,30 @@
#!/bin/bash
# Validate if the pod 'secure-pod' has the correct environment variables from Secret
POD_NAME="secure-pod"
NAMESPACE="workloads"
# Expected secret name and keys
EXPECTED_SECRET="db-credentials"
EXPECTED_USER_KEY="username"
EXPECTED_PASSWORD_KEY="password"
# Extract secret name and key used for DB_USER
DB_USER_SECRET=$(kubectl get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.containers[0].env[?(@.name=='DB_USER')].valueFrom.secretKeyRef.name}")
DB_USER_KEY=$(kubectl get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.containers[0].env[?(@.name=='DB_USER')].valueFrom.secretKeyRef.key}")
# Extract secret name and key used for DB_PASSWORD
DB_PASSWORD_SECRET=$(kubectl get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.containers[0].env[?(@.name=='DB_PASSWORD')].valueFrom.secretKeyRef.name}")
DB_PASSWORD_KEY=$(kubectl get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.containers[0].env[?(@.name=='DB_PASSWORD')].valueFrom.secretKeyRef.key}")
# Validate all
if [[ "$DB_USER_SECRET" == "$EXPECTED_SECRET" && "$DB_USER_KEY" == "$EXPECTED_USER_KEY" &&
"$DB_PASSWORD_SECRET" == "$EXPECTED_SECRET" && "$DB_PASSWORD_KEY" == "$EXPECTED_PASSWORD_KEY" ]]; then
echo "✅ Success: Pod '$POD_NAME' has correct secret name and keys for env variables"
exit 0
else
echo "❌ Error: Pod '$POD_NAME' does not have the correct secret configuration"
echo "DB_USER -> Secret: $DB_USER_SECRET, Key: $DB_USER_KEY"
echo "DB_PASSWORD -> Secret: $DB_PASSWORD_SECRET, Key: $DB_PASSWORD_KEY"
exit 1
fi

View File

@@ -0,0 +1,23 @@
#!/bin/bash
CRONJOB_NAME="log-cleaner"
NAMESPACE="workloads"
# Expected values
EXPECTED_COMMAND='["/bin/sh","-c"]'
EXPECTED_ARGS='find /var/log -type f -name "*.log" -mtime +7 -delete'
# Fetch actual values from the CronJob
ACTUAL_COMMAND=$(kubectl get cronjob "$CRONJOB_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.jobTemplate.spec.template.spec.containers[0].command}")
ACTUAL_ARGS=$(kubectl get cronjob "$CRONJOB_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.jobTemplate.spec.template.spec.containers[0].args[0]}")
# Compare
if [[ "$ACTUAL_COMMAND" == "$EXPECTED_COMMAND" && "$ACTUAL_ARGS" == "$EXPECTED_ARGS" ]]; then
echo "✅ Success: CronJob '$CRONJOB_NAME' has the correct command and args"
exit 0
else
echo "❌ Error: CronJob '$CRONJOB_NAME' does not have the correct command/args"
echo "Actual command: $ACTUAL_COMMAND"
echo "Actual args: $ACTUAL_ARGS"
exit 1
fi

View File

@@ -0,0 +1,28 @@
#!/bin/bash
CRONJOB_NAME="log-cleaner"
NAMESPACE="workloads"
# Expected values
EXPECTED_CONCURRENCY_POLICY="Forbid"
EXPECTED_SUCCESSFUL_LIMIT="3"
EXPECTED_FAILED_LIMIT="1"
# Fetch actual values from the CronJob spec
ACTUAL_CONCURRENCY_POLICY=$(kubectl get cronjob "$CRONJOB_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.concurrencyPolicy}")
ACTUAL_SUCCESSFUL_LIMIT=$(kubectl get cronjob "$CRONJOB_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.successfulJobsHistoryLimit}")
ACTUAL_FAILED_LIMIT=$(kubectl get cronjob "$CRONJOB_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.failedJobsHistoryLimit}")
# Compare
if [[ "$ACTUAL_CONCURRENCY_POLICY" == "$EXPECTED_CONCURRENCY_POLICY" && \
"$ACTUAL_SUCCESSFUL_LIMIT" == "$EXPECTED_SUCCESSFUL_LIMIT" && \
"$ACTUAL_FAILED_LIMIT" == "$EXPECTED_FAILED_LIMIT" ]]; then
echo "✅ Success: CronJob '$CRONJOB_NAME' has correct concurrency policy and history limits"
exit 0
else
echo "❌ Error: CronJob '$CRONJOB_NAME' has incorrect concurrency policy or history limits"
echo "ConcurrencyPolicy: $ACTUAL_CONCURRENCY_POLICY (expected: $EXPECTED_CONCURRENCY_POLICY)"
echo "SuccessfulJobsHistoryLimit: $ACTUAL_SUCCESSFUL_LIMIT (expected: $EXPECTED_SUCCESSFUL_LIMIT)"
echo "FailedJobsHistoryLimit: $ACTUAL_FAILED_LIMIT (expected: $EXPECTED_FAILED_LIMIT)"
exit 1
fi

View File

@@ -1,51 +1,41 @@
#!/bin/bash #!/bin/bash
# Validate that the ClusterRoleBinding 'read-pods' correctly associates the 'pod-reader' role with user 'jane' # Validate that the ClusterRoleBinding 'read-pods' correctly associates the 'pod-reader' ClusterRole with user 'jane'
CLUSTERROLEBINDING_NAME="read-pods" CLUSTERROLEBINDING_NAME="read-pods"
CLUSTERROLE_NAME="pod-reader" CLUSTERROLE_NAME="pod-reader"
USER_NAME="jane" USER_NAME="jane"
NAMESPACE="cluster-admin"
# Check if the ClusterRoleBinding exists # Check if the ClusterRoleBinding exists
kubectl get clusterrolebinding $CLUSTERROLEBINDING_NAME > /dev/null 2>&1 if ! kubectl get clusterrolebinding "$CLUSTERROLEBINDING_NAME" > /dev/null 2>&1; then
if [ $? -ne 0 ]; then
echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' not found" echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' not found"
exit 1 exit 1
fi fi
# Check if the ClusterRoleBinding references the correct ClusterRole # Check if the ClusterRoleBinding references the correct ClusterRole
ROLE_REF=$(kubectl get clusterrolebinding $CLUSTERROLEBINDING_NAME -o jsonpath='{.roleRef.name}' 2>/dev/null) ROLE_REF=$(kubectl get clusterrolebinding "$CLUSTERROLEBINDING_NAME" -o jsonpath='{.roleRef.name}')
if [ "$ROLE_REF" != "$CLUSTERROLE_NAME" ]; then if [ "$ROLE_REF" != "$CLUSTERROLE_NAME" ]; then
echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' references role '$ROLE_REF' instead of '$CLUSTERROLE_NAME'" echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' references role '$ROLE_REF' instead of '$CLUSTERROLE_NAME'"
exit 1 exit 1
fi fi
# Check the roleRef kind - should be ClusterRole # Check if the roleRef kind is 'ClusterRole'
ROLE_KIND=$(kubectl get clusterrolebinding $CLUSTERROLEBINDING_NAME -o jsonpath='{.roleRef.kind}' 2>/dev/null) ROLE_KIND=$(kubectl get clusterrolebinding "$CLUSTERROLEBINDING_NAME" -o jsonpath='{.roleRef.kind}')
if [ "$ROLE_KIND" != "ClusterRole" ]; then if [ "$ROLE_KIND" != "ClusterRole" ]; then
echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' references a '$ROLE_KIND' instead of a 'ClusterRole'" echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' references a '$ROLE_KIND' instead of a 'ClusterRole'"
exit 1 exit 1
fi fi
# Check if the ClusterRoleBinding has a subject for user 'jane' # Check if the ClusterRoleBinding binds to user 'jane'
SUBJECTS=$(kubectl get clusterrolebinding $CLUSTERROLEBINDING_NAME -o json | jq -r '.subjects[] | select(.name=="jane" and .kind=="User")' 2>/dev/null) SUBJECTS=$(kubectl get clusterrolebinding "$CLUSTERROLEBINDING_NAME" -o json)
if [ -z "$SUBJECTS" ]; then USER_BOUND=$(echo "$SUBJECTS" | grep -A 2 '"kind": "User"' | grep -q "\"name\": \"$USER_NAME\"" && echo "yes" || echo "no")
if [ "$USER_BOUND" != "yes" ]; then
echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' does not bind to user '$USER_NAME'" echo "❌ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' does not bind to user '$USER_NAME'"
exit 1 exit 1
fi fi
# Check if the namespace of the subject is specified as 'cluster-admin'
SUBJECT_NS=$(kubectl get clusterrolebinding $CLUSTERROLEBINDING_NAME -o json | jq -r '.subjects[] | select(.name=="jane" and .kind=="User") | .namespace' 2>/dev/null)
if [ "$SUBJECT_NS" != "$NAMESPACE" ] && [ "$SUBJECT_NS" != "null" ]; then
echo "❌ ClusterRoleBinding subject namespace is '$SUBJECT_NS' instead of '$NAMESPACE'"
exit 1
fi
# Success # Success
echo "✅ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' correctly associates ClusterRole '$CLUSTERROLE_NAME' with user '$USER_NAME'" echo "✅ ClusterRoleBinding '$CLUSTERROLEBINDING_NAME' correctly associates ClusterRole '$CLUSTERROLE_NAME' with user '$USER_NAME'"
echo "✅ This binding grants Jane read-only access to pod resources across all namespaces" echo "✅ This binding grants '$USER_NAME' read-only access to pod resources across all namespaces"
exit 0 exit 0

View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Validate that NetworkPolicy 'allow-traffic' has correct pod selector: app=web
NAMESPACE="networking"
POLICY_NAME="allow-traffic"
EXPECTED_KEY="app"
EXPECTED_VALUE="web"
# Check if the NetworkPolicy exists
if ! kubectl get networkpolicy "$POLICY_NAME" -n "$NAMESPACE" > /dev/null 2>&1; then
echo "❌ NetworkPolicy '$POLICY_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Fetch the podSelector key and value
ACTUAL_KEY=$(kubectl get networkpolicy "$POLICY_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.podSelector.matchLabels}" | grep -o '"[^"]*":' | tr -d '"':)
ACTUAL_VALUE=$(kubectl get networkpolicy "$POLICY_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.podSelector.matchLabels.$EXPECTED_KEY}")
# Validate key and value
if [ "$ACTUAL_KEY" != "$EXPECTED_KEY" ] || [ "$ACTUAL_VALUE" != "$EXPECTED_VALUE" ]; then
echo "❌ NetworkPolicy '$POLICY_NAME' has incorrect podSelector"
echo "Expected: $EXPECTED_KEY=$EXPECTED_VALUE"
echo "Found: $ACTUAL_KEY=$ACTUAL_VALUE"
exit 1
fi
# Success
echo "✅ NetworkPolicy '$POLICY_NAME' has correct podSelector: $EXPECTED_KEY=$EXPECTED_VALUE"
exit 0

View File

@@ -0,0 +1,39 @@
#!/bin/bash
# Validate that NetworkPolicy 'allow-traffic' has correct ingress rules:
# from pods with label tier=frontend and allows TCP on port 80
NAMESPACE="networking"
POLICY_NAME="allow-traffic"
EXPECTED_FROM_KEY="tier"
EXPECTED_FROM_VALUE="frontend"
EXPECTED_PROTOCOL="TCP"
EXPECTED_PORT="80"
# Check if the NetworkPolicy exists
if ! kubectl get networkpolicy "$POLICY_NAME" -n "$NAMESPACE" > /dev/null 2>&1; then
echo "❌ NetworkPolicy '$POLICY_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Validate podSelector in ingress.from
ACTUAL_FROM_VALUE=$(kubectl get networkpolicy "$POLICY_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.ingress[0].from[0].podSelector.matchLabels.$EXPECTED_FROM_KEY}")
if [ "$ACTUAL_FROM_VALUE" != "$EXPECTED_FROM_VALUE" ]; then
echo "❌ Ingress rule does not match expected podSelector: $EXPECTED_FROM_KEY=$EXPECTED_FROM_VALUE"
echo "Found: $EXPECTED_FROM_KEY=$ACTUAL_FROM_VALUE"
exit 1
fi
# Validate port and protocol
ACTUAL_PORT=$(kubectl get networkpolicy "$POLICY_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.ingress[0].ports[0].port}")
ACTUAL_PROTOCOL=$(kubectl get networkpolicy "$POLICY_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.ingress[0].ports[0].protocol}")
if [ "$ACTUAL_PORT" != "$EXPECTED_PORT" ] || [ "$ACTUAL_PROTOCOL" != "$EXPECTED_PROTOCOL" ]; then
echo "❌ Ingress rule does not allow expected port/protocol"
echo "Expected: $EXPECTED_PROTOCOL $EXPECTED_PORT"
echo "Found: $ACTUAL_PROTOCOL $ACTUAL_PORT"
exit 1
fi
# Success!
echo "✅ NetworkPolicy '$POLICY_NAME' has correct ingress rules: from pods with label '$EXPECTED_FROM_KEY=$EXPECTED_FROM_VALUE', allowing $EXPECTED_PROTOCOL on port $EXPECTED_PORT"
exit 0

View File

@@ -0,0 +1,27 @@
#!/bin/bash
# Validate that the 'internal-app' ClusterIP service has selector app=backend
SERVICE_NAME="internal-app"
NAMESPACE="networking"
EXPECTED_KEY="app"
EXPECTED_VALUE="backend"
# Check if the service exists
if ! kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" > /dev/null 2>&1; then
echo "❌ Service '$SERVICE_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Fetch selector key and value
ACTUAL_SELECTOR_VALUE=$(kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.selector.$EXPECTED_KEY}")
if [ "$ACTUAL_SELECTOR_VALUE" != "$EXPECTED_VALUE" ]; then
echo "❌ Service selector mismatch"
echo "Expected: $EXPECTED_KEY=$EXPECTED_VALUE"
echo "Found: $EXPECTED_KEY=$ACTUAL_SELECTOR_VALUE"
exit 1
fi
# Success
echo "✅ Service '$SERVICE_NAME' in namespace '$NAMESPACE' has correct selector: $EXPECTED_KEY=$EXPECTED_VALUE"
exit 0

View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Validate that 'internal-app' ClusterIP service routes port 80 -> targetPort 8080 using TCP
SERVICE_NAME="internal-app"
NAMESPACE="networking"
EXPECTED_PORT=80
EXPECTED_TARGET_PORT=8080
EXPECTED_PROTOCOL="TCP"
# Check if the service exists
if ! kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" > /dev/null 2>&1; then
echo "❌ Service '$SERVICE_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Get actual values
ACTUAL_PORT=$(kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.ports[0].port}")
ACTUAL_TARGET_PORT=$(kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.ports[0].targetPort}")
ACTUAL_PROTOCOL=$(kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.ports[0].protocol}")
# Validate port
if [ "$ACTUAL_PORT" != "$EXPECTED_PORT" ]; then
echo "❌ Service port is $ACTUAL_PORT, expected $EXPECTED_PORT"
exit 1
fi
# Validate targetPort
if [ "$ACTUAL_TARGET_PORT" != "$EXPECTED_TARGET_PORT" ]; then
echo "❌ Service targetPort is $ACTUAL_TARGET_PORT, expected $EXPECTED_TARGET_PORT"
exit 1
fi
# Validate protocol
if [ "$ACTUAL_PROTOCOL" != "$EXPECTED_PROTOCOL" ]; then
echo "❌ Service protocol is $ACTUAL_PROTOCOL, expected $EXPECTED_PROTOCOL"
exit 1
fi
# Success
echo "✅ Service '$SERVICE_NAME' correctly maps port $EXPECTED_PORT to targetPort $EXPECTED_TARGET_PORT using $EXPECTED_PROTOCOL"
exit 0

View File

@@ -8,89 +8,41 @@ EXPECTED_SERVICE="api-service"
EXPECTED_PORT=80 EXPECTED_PORT=80
# Check if the ingress exists # Check if the ingress exists
kubectl get ingress $INGRESS_NAME -n $NAMESPACE > /dev/null 2>&1 if ! kubectl get ingress "$INGRESS_NAME" -n "$NAMESPACE" > /dev/null 2>&1; then
if [ $? -ne 0 ]; then
echo "❌ Ingress '$INGRESS_NAME' not found in namespace '$NAMESPACE'" echo "❌ Ingress '$INGRESS_NAME' not found in namespace '$NAMESPACE'"
exit 1 exit 1
fi fi
# Check if the service exists # Check if the service exists
kubectl get service $EXPECTED_SERVICE -n $NAMESPACE > /dev/null 2>&1 if ! kubectl get service "$EXPECTED_SERVICE" -n "$NAMESPACE" > /dev/null 2>&1; then
if [ $? -ne 0 ]; then
echo "⚠️ Service '$EXPECTED_SERVICE' not found in namespace '$NAMESPACE'" echo "⚠️ Service '$EXPECTED_SERVICE' not found in namespace '$NAMESPACE'"
# Continue with validation as the service might be created later
fi fi
# Get API version to handle different Ingress structures # Extract hosts
API_VERSION=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o jsonpath='{.apiVersion}' 2>/dev/null) HOSTS=$(kubectl get ingress "$INGRESS_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.rules[*].host}")
echo " Ingress API version: $API_VERSION" for HOST in $HOSTS; do
if [ "$HOST" = "$EXPECTED_HOST" ]; then
# For this host, get service names and ports
SERVICES=$(kubectl get ingress "$INGRESS_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.rules[?(@.host=='$HOST')].http.paths[*].backend.service.name}")
PORTS=$(kubectl get ingress "$INGRESS_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.rules[?(@.host=='$HOST')].http.paths[*].backend.service.port.number}")
# Handle different API versions (v1 vs v1beta1) INDEX=0
if [[ "$API_VERSION" == "networking.k8s.io/v1" ]]; then for SERVICE in $SERVICES; do
# For v1 API PORT=$(echo $PORTS | awk -v idx=$((INDEX+1)) '{print $idx}')
RULE_INDEX=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --arg host "$EXPECTED_HOST" '.spec.rules | map(.host == $host) | index(true) // empty') echo "🔍 Host '$HOST' routes to service: $SERVICE, port: $PORT"
if [ "$SERVICE" = "$EXPECTED_SERVICE" ]; then
if [ -z "$RULE_INDEX" ]; then if [ "$PORT" = "$EXPECTED_PORT" ] || [ "$PORT" = "http" ]; then
echo "❌ No rule found for host '$EXPECTED_HOST'" echo "✅ Found correct backend service and port for host '$HOST'"
exit 1 exit 0
fi else
echo "⚠️ Service name matches but port is different: expected $EXPECTED_PORT, got $PORT"
# Check path-based rules fi
PATHS=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --argjson idx "$RULE_INDEX" '.spec.rules[$idx].http.paths[].path // "/"')
for PATH in $PATHS; do
# For each path, check the backend service
BACKEND_SERVICE_NAME=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --argjson idx "$RULE_INDEX" --arg path "$PATH" '.spec.rules[$idx].http.paths[] | select(.path == $path or (.path == null and $path == "/")).backend.service.name // empty')
BACKEND_SERVICE_PORT=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --argjson idx "$RULE_INDEX" --arg path "$PATH" '.spec.rules[$idx].http.paths[] | select(.path == $path or (.path == null and $path == "/")).backend.service.port.number // empty')
# If port is not a number, try getting it as a name
if [ -z "$BACKEND_SERVICE_PORT" ]; then
BACKEND_SERVICE_PORT=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --argjson idx "$RULE_INDEX" --arg path "$PATH" '.spec.rules[$idx].http.paths[] | select(.path == $path or (.path == null and $path == "/")).backend.service.port.name // empty')
fi
echo "🔍 Path '$PATH' routes to service: $BACKEND_SERVICE_NAME, port: $BACKEND_SERVICE_PORT"
if [ "$BACKEND_SERVICE_NAME" = "$EXPECTED_SERVICE" ]; then
# Check port
if [ "$BACKEND_SERVICE_PORT" = "$EXPECTED_PORT" ] || [ "$BACKEND_SERVICE_PORT" = "http" ]; then
echo "✅ Found correct backend service and port for path '$PATH'"
exit 0
else
echo "⚠️ Service name matches but port is different: expected $EXPECTED_PORT, got $BACKEND_SERVICE_PORT"
fi fi
fi INDEX=$((INDEX+1))
done done
else
# For v1beta1 or extensions/v1beta1 API
RULE_INDEX=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --arg host "$EXPECTED_HOST" '.spec.rules | map(.host == $host) | index(true) // empty')
if [ -z "$RULE_INDEX" ]; then
echo "❌ No rule found for host '$EXPECTED_HOST'"
exit 1
fi fi
done
# Check path-based rules
PATHS=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --argjson idx "$RULE_INDEX" '.spec.rules[$idx].http.paths[].path // "/"')
for PATH in $PATHS; do
# For each path, check the backend service
BACKEND_SERVICE_NAME=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --argjson idx "$RULE_INDEX" --arg path "$PATH" '.spec.rules[$idx].http.paths[] | select(.path == $path or (.path == null and $path == "/")).backend.serviceName // empty')
BACKEND_SERVICE_PORT=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o json | jq -r --argjson idx "$RULE_INDEX" --arg path "$PATH" '.spec.rules[$idx].http.paths[] | select(.path == $path or (.path == null and $path == "/")).backend.servicePort // empty')
echo "🔍 Path '$PATH' routes to service: $BACKEND_SERVICE_NAME, port: $BACKEND_SERVICE_PORT"
if [ "$BACKEND_SERVICE_NAME" = "$EXPECTED_SERVICE" ]; then
# Check port - can be number or name
if [ "$BACKEND_SERVICE_PORT" = "$EXPECTED_PORT" ] || [ "$BACKEND_SERVICE_PORT" = "http" ]; then
echo "✅ Found correct backend service and port for path '$PATH'"
exit 0
else
echo "⚠️ Service name matches but port is different: expected $EXPECTED_PORT, got $BACKEND_SERVICE_PORT"
fi
fi
done
fi
# If we get here, we didn't find the expected backend # If we get here, it means validation failed
echo "❌ Ingress does not route traffic to '$EXPECTED_SERVICE' on port $EXPECTED_PORT for host '$EXPECTED_HOST'" echo "❌ Ingress does not route traffic to '$EXPECTED_SERVICE' on port $EXPECTED_PORT for host '$EXPECTED_HOST'"
exit 1 exit 1

View File

@@ -0,0 +1,14 @@
#!/bin/bash
# Check if the Job is created with the correct name in the networking namespace
JOB_NAME="hello-job"
NAMESPACE="networking"
# Check if the job exists
if kubectl get job ${JOB_NAME} -n ${NAMESPACE} &> /dev/null; then
echo "✅ Job '${JOB_NAME}' exists in namespace '${NAMESPACE}'"
exit 0
else
echo "❌ Job '${JOB_NAME}' does not exist in namespace '${NAMESPACE}'"
exit 1
fi

View File

@@ -2,7 +2,7 @@
# Validate that the Kubernetes Job 'backup-job' in namespace 'networking' has the correct configuration # Validate that the Kubernetes Job 'backup-job' in namespace 'networking' has the correct configuration
NAMESPACE="networking" NAMESPACE="networking"
JOB_NAME="backup-job" JOB_NAME="hello-job"
EXPECTED_IMAGE="busybox" EXPECTED_IMAGE="busybox"
EXPECTED_RESTART_POLICY="Never" EXPECTED_RESTART_POLICY="Never"
EXPECTED_BACKOFF_LIMIT=0 EXPECTED_BACKOFF_LIMIT=0
@@ -14,6 +14,15 @@ if [ $? -ne 0 ]; then
exit 1 exit 1
fi fi
# Check for activeDeadlineSeconds set to 30
DEADLINE=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.spec.activeDeadlineSeconds}')
if [[ "$DEADLINE" == "30" ]]; then
echo "✅ Job has correct activeDeadlineSeconds value: 30"
else
echo "❌ Job has incorrect activeDeadlineSeconds value: ${DEADLINE:-not set}"
exit 1
fi
# Check the job's backoffLimit # Check the job's backoffLimit
BACKOFF_LIMIT=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.spec.backoffLimit}' 2>/dev/null) BACKOFF_LIMIT=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.spec.backoffLimit}' 2>/dev/null)

View File

@@ -0,0 +1,56 @@
#!/bin/bash
# Check if the Job completes successfully
JOB_NAME="hello-job"
NAMESPACE="networking"
# Check if the job exists
if ! kubectl get job ${JOB_NAME} -n ${NAMESPACE} &> /dev/null; then
echo "❌ Job '${JOB_NAME}' does not exist in namespace '${NAMESPACE}'"
exit 1
fi
# Check if the job has completed
STATUS=$(kubectl get job ${JOB_NAME} -n ${NAMESPACE} -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}')
if [[ "$STATUS" == "True" ]]; then
echo "✅ Job '${JOB_NAME}' has completed successfully"
# Check if the job's pod generated the expected output
POD_NAME=$(kubectl get pods -n ${NAMESPACE} -l job-name=${JOB_NAME} -o jsonpath='{.items[0].metadata.name}')
if [ -n "$POD_NAME" ]; then
LOG=$(kubectl logs ${POD_NAME} -n ${NAMESPACE})
if [[ "$LOG" == *"Hello from Kubernetes job!"* ]]; then
echo "✅ Job pod produced expected output: '$LOG'"
exit 0
else
echo "❌ Job pod did not produce expected output. Found: '$LOG'"
exit 1
fi
else
echo "⚠️ Could not find pod for job, but job shows as complete"
exit 0
fi
else
# If not completed, check if it's still running (could be normal)
ACTIVE=$(kubectl get job ${JOB_NAME} -n ${NAMESPACE} -o jsonpath='{.status.active}')
if [[ "$ACTIVE" == "1" ]]; then
echo "⚠️ Job '${JOB_NAME}' is still running. Please wait for it to complete."
exit 1
else
# If not active and not complete, check for failure
FAILED=$(kubectl get job ${JOB_NAME} -n ${NAMESPACE} -o jsonpath='{.status.failed}')
if [[ -n "$FAILED" && "$FAILED" -gt 0 ]]; then
echo "❌ Job '${JOB_NAME}' has failed with $FAILED failures"
# Try to get logs from the failed pod for debugging
FAILED_POD=$(kubectl get pods -n ${NAMESPACE} -l job-name=${JOB_NAME} -o jsonpath='{.items[0].metadata.name}')
if [ -n "$FAILED_POD" ]; then
echo "❌ Error logs: $(kubectl logs ${FAILED_POD} -n ${NAMESPACE})"
fi
exit 1
else
echo "❌ Job '${JOB_NAME}' has not completed and is not running"
exit 1
fi
fi
fi

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
# Validate if the StorageClass 'slow-storage' has the correct volumeBindingMode # Validate if the StorageClass 'slow-storage' has the correct volumeBindingMode
BINDING_MODE=$(kubectl get storageclass slow-storage -o jsonpath='{.volumeBindingMode}' 2>/dev/null) BINDING_MODE=$(kubectl get storageclass fast-storage -o jsonpath='{.volumeBindingMode}' 2>/dev/null)
if [ "$BINDING_MODE" = "WaitForFirstConsumer" ]; then if [ "$BINDING_MODE" = "WaitForFirstConsumer" ]; then
echo "Success: StorageClass 'fast-storage' has the correct volumeBindingMode (WaitForFirstConsumer)" echo "Success: StorageClass 'fast-storage' has the correct volumeBindingMode (WaitForFirstConsumer)"

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Validate if PersistentVolumeClaim has correct access mode
ACCESS_MODE=$(kubectl get pvc pvc-app -n storage-test -o jsonpath='{.spec.accessModes[0]}' 2>/dev/null)
if [ "$ACCESS_MODE" = "ReadWriteOnce" ]; then
echo "Success: PersistentVolumeClaim 'pvc-app' has the correct access mode (ReadWriteOnce)"
exit 0
else
echo "Error: PersistentVolumeClaim 'pvc-app' does not have the correct access mode. Found: '$ACCESS_MODE', Expected: 'ReadWriteOnce'"
exit 1
fi

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Validate if PersistentVolumeClaim uses correct StorageClass
STORAGE_CLASS=$(kubectl get pvc pvc-app -n storage-test -o jsonpath='{.spec.storageClassName}' 2>/dev/null)
if [ "$STORAGE_CLASS" = "fast-storage" ]; then
echo "Success: PersistentVolumeClaim 'pvc-app' uses correct StorageClass (fast-storage)"
exit 0
else
echo "Error: PersistentVolumeClaim 'pvc-app' does not have the correct storage class. Found: '$STORAGE_CLASS', Expected: 'fast-storage'"
exit 1
fi

View File

@@ -0,0 +1,18 @@
# validate the container image is correct having image name nginx
# Validate if the pods of deployment 'broken-app' are running in the 'troubleshooting' namespace
#check deployment status is runing or not
DEPLOYMENT_STATUS=$(kubectl get deployment broken-app -n troubleshooting -o jsonpath='{.status.conditions[?(@.type=="Available")].status}')
if [ "$DEPLOYMENT_STATUS" != "True" ]; then
echo "Error: The deployment 'broken-app' is not running"
exit 1
fi
IMAGE=$(kubectl get deployment broken-app -n troubleshooting -o jsonpath='{.spec.template.spec.containers[0].image}' | cut -d':' -f1)
if [ "$IMAGE" == "nginx" ]; then
echo "Success: The container image is correct"
exit 0
else
echo "Error: The container image is not correct"
exit 1
fi

View File

@@ -1,30 +1,22 @@
#!/bin/bash #!/bin/bash
# Validate if the pod 'sidecar-pod' exists in the 'troubleshooting' namespace and has two containers # Validate if the pod 'sidecar-pod' exists in the 'troubleshooting' namespace and has containers named 'nginx' and 'sidecar'
POD_EXISTS=$(kubectl get pod sidecar-pod -n troubleshooting -o name 2>/dev/null) POD_EXISTS=$(kubectl get pod sidecar-pod -n troubleshooting -o name 2>/dev/null)
CONTAINER_COUNT=$(kubectl get pod sidecar-pod -n troubleshooting -o jsonpath='{.spec.containers}' 2>/dev/null | jq '. | length')
if [ -z "$POD_EXISTS" ]; then if [ -z "$POD_EXISTS" ]; then
echo "Error: Pod 'sidecar-pod' does not exist in namespace 'troubleshooting'" echo "Error: Pod 'sidecar-pod' does not exist in namespace 'troubleshooting'"
exit 1 exit 1
fi fi
if [ "$CONTAINER_COUNT" -eq "2" ]; then # Verify container names
echo "Success: Pod 'sidecar-pod' exists and has two containers" NGINX_CONTAINER=$(kubectl get pod sidecar-pod -n troubleshooting -o jsonpath='{.spec.containers[?(@.name=="nginx")].name}' 2>/dev/null)
SIDECAR_CONTAINER=$(kubectl get pod sidecar-pod -n troubleshooting -o jsonpath='{.spec.containers[?(@.name=="sidecar")].name}' 2>/dev/null)
# Verify container names and images
NGINX_CONTAINER=$(kubectl get pod sidecar-pod -n troubleshooting -o jsonpath='{.spec.containers[?(@.image=="nginx")].name}' 2>/dev/null) if [ -n "$NGINX_CONTAINER" ] && [ -n "$SIDECAR_CONTAINER" ]; then
BUSYBOX_CONTAINER=$(kubectl get pod sidecar-pod -n troubleshooting -o jsonpath='{.spec.containers[?(@.image=="busybox")].name}' 2>/dev/null) echo "Success: Pod has both 'nginx' and 'sidecar' containers"
exit 0
if [ -n "$NGINX_CONTAINER" ] && [ -n "$BUSYBOX_CONTAINER" ]; then
echo "Success: Pod has both nginx and busybox containers"
exit 0
else
echo "Error: Pod does not have the required container images (nginx and busybox)"
echo "Found containers with images: $(kubectl get pod sidecar-pod -n troubleshooting -o jsonpath='{.spec.containers[*].image}' 2>/dev/null)"
exit 1
fi
else else
echo "Error: Pod 'sidecar-pod' does not have two containers. Found: $CONTAINER_COUNT containers" echo "Error: Pod does not have the required container names ('nginx' and 'sidecar')"
echo "Found containers with names: $(kubectl get pod sidecar-pod -n troubleshooting -o jsonpath='{.spec.containers[*].name}' 2>/dev/null)"
exit 1 exit 1
fi fi

View File

@@ -1,6 +1,7 @@
#!/bin/bash #!/bin/bash
# Validate that the 'logging-pod' is running in namespace 'troubleshooting' # Validate that the 'logging-pod' is running in namespace 'troubleshooting'
NAMESPACE="troubleshooting" NAMESPACE="troubleshooting"
POD_NAME="logging-pod" POD_NAME="logging-pod"
@@ -12,29 +13,23 @@ if [ -z "$POD_STATUS" ]; then
exit 1 exit 1
fi fi
#validate `100m` and memory limits `50Mi` set for the container
# Check if the pod is running # Check if the pod is running
if [ "$POD_STATUS" != "Running" ]; then if [ "$POD_STATUS" != "Running" ]; then
echo "❌ Pod '$POD_NAME' exists but is not running (current status: $POD_STATUS)" echo "❌ Pod '$POD_NAME' exists but is not running (current status: $POD_STATUS)"
# Get additional details about non-running pod
CONTAINER_STATUSES=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[*].state}' 2>/dev/null)
if [ -n "$CONTAINER_STATUSES" ]; then
echo "Container statuses: $CONTAINER_STATUSES"
fi
exit 1 exit 1
fi fi
# Check if all containers are ready # Check if the container has the correct CPU and memory limits
READY_COUNT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[*].ready}' | grep -o "true" | wc -l) CONTAINER_NAME=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].name}')
CONTAINER_COUNT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[*].name}' | wc -w) CPU_LIMIT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath="{.spec.containers[?(@.name==\"$CONTAINER_NAME\")].resources.limits.cpu}")
MEMORY_LIMIT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath="{.spec.containers[?(@.name==\"$CONTAINER_NAME\")].resources.limits.memory}")
if [ "$READY_COUNT" -ne "$CONTAINER_COUNT" ]; then if [ "$CPU_LIMIT" != "100m" ] || [ "$MEMORY_LIMIT" != "50Mi" ]; then
echo "Pod '$POD_NAME' is running, but not all containers are ready ($READY_COUNT of $CONTAINER_COUNT ready)" echo "Container '$CONTAINER_NAME' does not have the correct CPU and memory limits"
exit 1 exit 1
fi fi
# All checks passed echo "✅ Container '$CONTAINER_NAME' has the correct CPU and memory limits"
echo "✅ Pod '$POD_NAME' is running and all containers are ready"
exit 0 exit 0

View File

@@ -1,60 +0,0 @@
#!/bin/bash
# Validate that CPU usage for 'logging-pod' in namespace 'troubleshooting' is within acceptable limits
NAMESPACE="troubleshooting"
POD_NAME="logging-pod"
# Setting a reasonable CPU usage threshold (in millicores)
CPU_THRESHOLD=800
# Check if the pod exists
kubectl get pod $POD_NAME -n $NAMESPACE > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'"
exit 1
fi
# Get CPU limits from the pod
CONTAINERS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[*].name}')
echo "🔍 Checking CPU usage for pod '$POD_NAME'..."
# Use kubectl top to get current CPU usage
CPU_USAGE=$(kubectl top pod $POD_NAME -n $NAMESPACE --no-headers 2>/dev/null | awk '{print $2}')
# Handle case where metrics-server might not be available
if [ -z "$CPU_USAGE" ]; then
echo "⚠️ Cannot measure actual CPU usage (metrics-server may not be available)"
echo "✅ Assuming CPU usage is acceptable since pod is running with limits"
exit 0
fi
# Extract numeric value from CPU usage (e.g., "156m" -> 156)
CPU_VALUE=$(echo $CPU_USAGE | sed 's/[^0-9]*//g')
if [ -z "$CPU_VALUE" ]; then
echo "⚠️ Cannot parse CPU usage value: $CPU_USAGE"
echo "✅ Assuming CPU usage is acceptable since pod is running with limits"
exit 0
fi
echo "📊 Current CPU usage: ${CPU_VALUE}m"
# Check against threshold
if [ $CPU_VALUE -gt $CPU_THRESHOLD ]; then
echo "❌ CPU usage (${CPU_VALUE}m) exceeds threshold (${CPU_THRESHOLD}m)"
exit 1
fi
# Check if all containers have CPU limits set
for CONTAINER in $CONTAINERS; do
CPU_LIMIT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath="{.spec.containers[?(@.name==\"$CONTAINER\")].resources.limits.cpu}")
if [ -z "$CPU_LIMIT" ]; then
echo "⚠️ Container '$CONTAINER' does not have CPU limits set"
else
echo "✅ Container '$CONTAINER' has CPU limits: $CPU_LIMIT"
fi
done
echo "✅ CPU usage is within acceptable limits"
exit 0

View File

@@ -1,6 +1,6 @@
# CKAD-002 Lab Answers # CKAD-002 Lab Answers
This document contains solutions for all questions in the CKAD-002 lab, which is based on the CKAD-exercises repository by dgkanatsios. This document contains solutions for all questions in the CKAD-002 lab
## Question 1: Core Concepts ## Question 1: Core Concepts
@@ -509,203 +509,457 @@ spec:
EOF EOF
``` ```
## Question 19: Pod Networking ## Question 11: Security Context
### Task: Create a Pod with security configurations:
Create a pod with specific networking configurations:
1. Create a namespace called `pod-networking`
2. Create a pod named `network-pod` in the `pod-networking` namespace with the image `nginx:alpine`
3. Configure the pod with the following:
- Hostname set to `custom-host`
- Subdomain set to `example`
- DNS Policy set to `ClusterFirstWithHostNet`
- Add custom DNS configuration:
- Nameservers: `8.8.8.8` and `8.8.4.4`
- Searches: `example.com`
### Solution:
First, create the namespace:
```bash ```bash
kubectl create namespace pod-networking # Create namespace
``` kubectl create namespace security
Then, create a YAML file for the pod with the required networking configurations: # Create secure pod
cat <<EOF | kubectl apply -f -
```yaml apiVersion: v1
kind: Namespace
metadata:
name: security
---
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: network-pod name: secure-app
namespace: pod-networking namespace: security
spec: spec:
hostname: custom-host securityContext:
subdomain: example runAsUser: 1000
dnsPolicy: ClusterFirstWithHostNet runAsNonRoot: true
dnsConfig:
nameservers:
- 8.8.8.8
- 8.8.4.4
searches:
- example.com
containers: containers:
- name: nginx - name: nginx
image: nginx:alpine image: nginx:alpine
securityContext:
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
EOF
``` ```
Apply the YAML file: ## Question 12: Docker Basics
Create a simple Docker image and run it:
```bash ```bash
kubectl apply -f network-pod.yaml # Create the Dockerfile
cat > /tmp/Dockerfile << 'EOF'
FROM nginx:alpine
COPY index.html /usr/share/nginx/html/
EXPOSE 80
EOF
# Create the HTML file
cat > /tmp/index.html << 'EOF'
<!DOCTYPE html>
<html>
<body>
<h1>Hello from CKAD Docker Question!</h1>
</body>
</html>
EOF
# Build the Docker image
docker build -t my-nginx:v1 -f /tmp/Dockerfile /tmp
# Run the container
docker run -d --name my-web -p 8080:80 my-nginx:v1
# Verify the container is running
docker ps | grep my-web
``` ```
Verify the pod is running: ## Question 13: Jobs
Create a Job with specific configurations:
```bash ```bash
kubectl get pod -n pod-networking # Create namespace
kubectl create namespace jobs
# Create Job
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: jobs
---
apiVersion: batch/v1
kind: Job
metadata:
name: data-processor
namespace: jobs
spec:
backoffLimit: 4
activeDeadlineSeconds: 30
template:
spec:
containers:
- name: processor
image: busybox
command: ['sh', '-c', 'for i in $(seq 1 5); do echo Processing item $i; sleep 2; done']
restartPolicy: Never
EOF
``` ```
## Question 20: Network Policies ## Question 14: Init Containers
### Task: Create a Pod with init container and service:
Create network policies to control traffic between pods:
1. Create a namespace called `network-policy`
2. Create three pods in the namespace:
- A pod named `web` with image `nginx` and label `app=web`
- A pod named `db` with image `postgres` and label `app=db`
- A pod named `cache` with image `redis` and label `app=cache`
3. Create a network policy named `db-policy` that allows only the `web` pod to access the `db` pod on port 5432
4. Create a network policy named `cache-policy` that allows only the `web` pod to access the `cache` pod on port 6379
5. Create a default deny policy named `default-deny` that blocks all other traffic within the namespace
### Solution:
First, create the namespace:
```bash ```bash
kubectl create namespace network-policy # Create namespace
``` kubectl create namespace init-containers
Create the three pods: # Create Pod with init container and Service
cat <<EOF | kubectl apply -f -
```yaml apiVersion: v1
kind: Namespace
metadata:
name: init-containers
---
apiVersion: v1
kind: Service
metadata:
name: myservice
namespace: init-containers
spec:
selector:
app: myservice
ports:
- port: 80
---
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: web name: app-with-init
namespace: network-policy namespace: init-containers
labels: spec:
app: web containers:
- name: main-container
image: nginx
volumeMounts:
- name: log-volume
mountPath: /shared
initContainers:
- name: sidecar-container
image: busybox
command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done']
volumeMounts:
- name: log-volume
mountPath: /shared
volumes:
- name: log-volume
emptyDir: {}
EOF
```
## Question 15 - Helm Basics
The task is to perform basic Helm operations including creating a namespace, adding a repository, installing a chart, and saving release notes.
```bash
# Step 1: Create the namespace
kubectl create namespace helm-basics
# Step 2: Add the Bitnami repository
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
# Step 3: Install the nginx chart
helm install nginx-release bitnami/nginx --namespace helm-basics
# Step 4: Save the release notes to a file
helm get notes nginx-release --namespace helm-basics > /tmp/release-notes.txt
```
These commands:
1. Create a namespace called `helm-basics`
2. Add the Bitnami Helm chart repository and update it to get the latest charts
3. Install the nginx chart from Bitnami in the helm-basics namespace with the release name "nginx-release"
4. Save the release notes to /tmp/release-notes.txt using the `helm get notes` command
## Question 16: Health Checks
Create a Pod with multiple health probes:
```bash
# Create namespace
kubectl create namespace health-checks
# Create Pod with startup, liveness, and readiness probes
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: health-checks
---
apiVersion: v1
kind: Pod
metadata:
name: health-check-pod
namespace: health-checks
spec: spec:
containers: containers:
- name: nginx - name: nginx
image: nginx image: nginx
startupProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 10
periodSeconds: 3
failureThreshold: 3
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 15
periodSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 3
failureThreshold: 3
EOF
```
## Question 17: Pod Lifecycle
Create a Pod with lifecycle hooks:
```bash
# Create namespace
kubectl create namespace pod-lifecycle
# Create Pod with lifecycle hooks
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: pod-lifecycle
--- ---
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: db name: lifecycle-pod
namespace: network-policy namespace: pod-lifecycle
labels:
app: db
spec: spec:
containers: containers:
- name: postgres - name: nginx
image: postgres image: nginx
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "echo 'Welcome to the pod!' > /usr/share/nginx/html/welcome.txt"]
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 10"]
terminationGracePeriodSeconds: 30
EOF
```
## Question 18: Custom Resource Definitions
Create a CRD and a custom resource:
```bash
# Create namespace
kubectl create namespace crd-demo
# Create the Custom Resource Definition (CRD)
cat <<EOF | kubectl apply -f -
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: applications.training.ckad.io
spec:
group: training.ckad.io
names:
kind: Application
plural: applications
singular: application
shortNames:
- app
scope: Namespaced
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required: ["image", "replicas"]
properties:
image:
type: string
replicas:
type: integer
minimum: 1
EOF
# Create the Custom Resource
cat <<EOF | kubectl apply -f -
apiVersion: training.ckad.io/v1
kind: Application
metadata:
name: my-app
namespace: crd-demo
spec:
image: nginx:1.19.0
replicas: 3
EOF
# Verify the resources
kubectl get crd applications.training.ckad.io
kubectl get application -n crd-demo
```
## Question 19: Custom Column Output
Use kubectl custom columns to extract pod information:
```bash
# Create namespace (should already be set up by the test environment)
kubectl create namespace custom-columns-demo
# First, let's see what pods we have to work with
kubectl get pods -n custom-columns-demo
# Create the basic custom column output showing pods from all namespaces
# Format: POD NAME, NAMESPACE, and PRIMARY CONTAINER IMAGE
kubectl get pods -A -o custom-columns="POD:.metadata.name,NAMESPACE:.metadata.namespace,IMAGE:.spec.containers[0].image" > /tmp/pod-images.txt
# Verify the basic output
cat /tmp/pod-images.txt
# For the second requirement, we need to handle multi-container pods
# Option 1: Using jsonpath to get comma-separated list of all container images
kubectl get pods -A -o jsonpath="{range .items[*]}{.metadata.name},{.metadata.namespace},{range .spec.containers[*]}{.image}{','}{end}{'\n'}{end}" > /tmp/all-container-images.txt
# Option 2: Using a more advanced approach with a script
cat <<'EOF' > /tmp/get-pod-images.sh
#!/bin/bash
echo "POD,NAMESPACE,IMAGES"
kubectl get pods -A -o json | jq -r '.items[] | .metadata.name + "," + .metadata.namespace + "," + (.spec.containers | map(.image) | join(","))'
EOF
chmod +x /tmp/get-pod-images.sh
/tmp/get-pod-images.sh > /tmp/all-container-images.txt
# Verify the multi-container output
cat /tmp/all-container-images.txt
# Check that our outputs contain the expected data
grep "multi-container-pod" /tmp/all-container-images.txt
```
This solution creates two output files:
1. `/tmp/pod-images.txt` - Shows all pods with their names, namespaces, and primary container images
2. `/tmp/all-container-images.txt` - Shows all pods with all container images, properly handling multi-container pods
## Question 20: Pod Configuration
Create a Pod that uses ConfigMaps and Secrets for configuration:
```bash
# Create namespace
kubectl create namespace pod-configuration
# Create ConfigMap with database connection settings
kubectl create configmap app-config -n pod-configuration \
--from-literal=DB_HOST=db.example.com \
--from-literal=DB_PORT=5432
# Verify the ConfigMap was created correctly
kubectl get configmap app-config -n pod-configuration -o yaml
# Create Secret with API credentials
kubectl create secret generic app-secret -n pod-configuration \
--from-literal=API_KEY=my-api-key \
--from-literal=API_SECRET=my-api-secret
# Verify the Secret was created (note values will be base64 encoded)
kubectl get secret app-secret -n pod-configuration -o yaml
# Create Pod with environment variables and volume mounts
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: config-pod
namespace: pod-configuration
spec:
containers:
- name: nginx
image: nginx
# Direct environment variables
env: env:
- name: POSTGRES_PASSWORD - name: APP_ENV
value: "password" value: production
--- - name: DEBUG
apiVersion: v1 value: "false"
kind: Pod # Environment variables from ConfigMap
metadata: - name: DB_HOST
name: cache valueFrom:
namespace: network-policy configMapKeyRef:
labels: name: app-config
app: cache key: DB_HOST
spec: - name: DB_PORT
containers: valueFrom:
- name: redis configMapKeyRef:
image: redis name: app-config
key: DB_PORT
# Environment variables from Secret
- name: API_KEY
valueFrom:
secretKeyRef:
name: app-secret
key: API_KEY
- name: API_SECRET
valueFrom:
secretKeyRef:
name: app-secret
key: API_SECRET
# Mount ConfigMap as a volume
volumeMounts:
- name: config-volume
mountPath: /etc/app-config
volumes:
- name: config-volume
configMap:
name: app-config
EOF
# Verify the Pod has been created
kubectl get pod config-pod -n pod-configuration
# Verify environment variables within the Pod
kubectl exec config-pod -n pod-configuration -- env | grep -E 'APP_ENV|DEBUG|DB_|API_'
# Verify the ConfigMap is mounted as a volume
kubectl exec config-pod -n pod-configuration -- ls -la /etc/app-config
kubectl exec config-pod -n pod-configuration -- cat /etc/app-config/DB_HOST
``` ```
Save this as `pods.yaml` and apply it: This solution demonstrates:
1. Creating a ConfigMap with database connection settings
```bash 2. Creating a Secret with API credentials
kubectl apply -f pods.yaml 3. Configuring a Pod with:
``` - Direct environment variables (APP_ENV, DEBUG)
- Environment variables from ConfigMap (DB_HOST, DB_PORT)
Create the DB network policy: - Environment variables from Secret (API_KEY, API_SECRET)
- Mounting the ConfigMap as a volume at /etc/app-config
```yaml 4. Verification commands to ensure everything is working correctly
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: db-policy
namespace: network-policy
spec:
podSelector:
matchLabels:
app: db
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: web
ports:
- port: 5432
```
Create the cache network policy:
```yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: cache-policy
namespace: network-policy
spec:
podSelector:
matchLabels:
app: cache
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: web
ports:
- port: 6379
```
Create the default deny policy:
```yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
namespace: network-policy
spec:
podSelector: {}
policyTypes:
- Ingress
```
Save these as separate files or in one file with `---` separators, and apply them:
```bash
kubectl apply -f network-policies.yaml
```
Verify the network policies are applied:
```bash
kubectl get networkpolicy -n network-policy
```

View File

@@ -3,7 +3,7 @@
{ {
"id": "1", "id": "1",
"namespace": "core-concepts", "namespace": "core-concepts",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a namespace called `core-concepts` and a pod with image `nginx` called `nginx-pod` in this namespace.\n\nThe pod should have the following labels: `app=web`, `env=prod`.", "question": "Create a namespace called `core-concepts` and a pod with image `nginx` called `nginx-pod` in this namespace.\n\nThe pod should have the following labels: `app=web`, `env=prod`.",
"concepts": ["core-concepts", "pods", "namespaces", "labels"], "concepts": ["core-concepts", "pods", "namespaces", "labels"],
"verification": [ "verification": [
@@ -40,7 +40,7 @@
{ {
"id": "2", "id": "2",
"namespace": "multi-container", "namespace": "multi-container",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `multi-container-pod` in the `multi-container` namespace with \ntwo containers:\n1. Container 1 - \nName: `main-container`, \nImage: `nginx`\n2. Container 2 - \nName: `sidecar-container`, \nImage: `busybox`, \nCommand: `['sh', '-c', 'while true; do echo $(date) >> /var/log/app.log; sleep 5; done']`\n\nCreate a shared volume named `log-volume` that both containers can access. Mount this volume at `/var/log` in both containers. Ensure the namespace exists before creating the pod.", "question": "Create a Pod named `multi-container-pod` in the `multi-container` namespace with \ntwo containers:\n1. Container 1 - \nName: `main-container`, \nImage: `nginx`\n2. Container 2 - \nName: `sidecar-container`, \nImage: `busybox`, \nCommand: `['sh', '-c', 'while true; do echo $(date) >> /var/log/app.log; sleep 5; done']`\n\nCreate a shared volume named `log-volume` that both containers can access. Mount this volume at `/var/log` in both containers. Ensure the namespace exists before creating the pod.",
"concepts": ["multi-container-pods", "volumes", "namespaces"], "concepts": ["multi-container-pods", "volumes", "namespaces"],
"verification": [ "verification": [
@@ -77,7 +77,7 @@
{ {
"id": "3", "id": "3",
"namespace": "pod-design", "namespace": "pod-design",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Deployment in the `pod-design` namespace with the following specifications:\n- Name: `frontend`\n- Replicas: `3`\n- Image: `nginx:1.19.0`\n- Labels: `app=frontend, tier=frontend`\n- Pod Labels: same as deployment labels\n\nThen create a service `frontend-svc` that exposes the deployment on port `80`, targeting container port `80`, and is of type `ClusterIP`. Ensure the namespace exists before creating the resources.", "question": "Create a Deployment in the `pod-design` namespace with the following specifications:\n- Name: `frontend`\n- Replicas: `3`\n- Image: `nginx:1.19.0`\n- Labels: `app=frontend, tier=frontend`\n- Pod Labels: same as deployment labels\n\nThen create a service `frontend-svc` that exposes the deployment on port `80`, targeting container port `80`, and is of type `ClusterIP`. Ensure the namespace exists before creating the resources.",
"concepts": ["deployments", "services", "labels", "selectors"], "concepts": ["deployments", "services", "labels", "selectors"],
"verification": [ "verification": [
@@ -114,7 +114,7 @@
{ {
"id": "4", "id": "4",
"namespace": "configuration", "namespace": "configuration",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a ConfigMap named `app-config` in the `configuration` namespace \nwith the following data:\n`DB_HOST=mysql`\n`DB_PORT=3306`\n`DB_NAME=myapp`\n\nThen create a Secret named `app-secret` \nwith the following data:\n`DB_USER=admin`\n`DB_PASSWORD=s3cr3t`\n\nFinally, create a Pod named `app-pod` using the `nginx` image that uses both the ConfigMap and Secret. \n\nMount the ConfigMap as environment variables and the Secret as a volume at `/etc/app-secret`. Ensure the namespace exists before creating the resources.", "question": "Create a ConfigMap named `app-config` in the `configuration` namespace \nwith the following data:\n`DB_HOST=mysql`\n`DB_PORT=3306`\n`DB_NAME=myapp`\n\nThen create a Secret named `app-secret` \nwith the following data:\n`DB_USER=admin`\n`DB_PASSWORD=s3cr3t`\n\nFinally, create a Pod named `app-pod` using the `nginx` image that uses both the ConfigMap and Secret. \n\nMount the ConfigMap as environment variables and the Secret as a volume at `/etc/app-secret`. Ensure the namespace exists before creating the resources.",
"concepts": ["configmaps", "secrets", "environment-variables", "volumes"], "concepts": ["configmaps", "secrets", "environment-variables", "volumes"],
"verification": [ "verification": [
@@ -158,7 +158,7 @@
{ {
"id": "5", "id": "5",
"namespace": "observability", "namespace": "observability",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `probes-pod` in the `observability` namespace using the image `nginx`. Configure the following probes:\n\n1. Liveness probe: HTTP GET request to path `/healthz` on port `80`, with initialDelaySeconds=`10` and periodSeconds=`5`\n\n2. Readiness probe: HTTP GET request to path `/` on port `80`, with initialDelaySeconds=`5` and periodSeconds=`3`\n\nAlso, configure the pod with resource requests of CPU=`100m` and memory=`128Mi`, and resource limits of CPU=`200m` and memory=`256Mi`. \nEnsure the namespace exists before creating the pod.", "question": "Create a Pod named `probes-pod` in the `observability` namespace using the image `nginx`. Configure the following probes:\n\n1. Liveness probe: HTTP GET request to path `/healthz` on port `80`, with initialDelaySeconds=`10` and periodSeconds=`5`\n\n2. Readiness probe: HTTP GET request to path `/` on port `80`, with initialDelaySeconds=`5` and periodSeconds=`3`\n\nAlso, configure the pod with resource requests of CPU=`100m` and memory=`128Mi`, and resource limits of CPU=`200m` and memory=`256Mi`. \nEnsure the namespace exists before creating the pod.",
"concepts": ["probes", "liveness", "readiness", "resource-limits"], "concepts": ["probes", "liveness", "readiness", "resource-limits"],
"verification": [ "verification": [
@@ -202,7 +202,7 @@
{ {
"id": "6", "id": "6",
"namespace": "services", "namespace": "services",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Deployment named `web-app` in the `services` namespace with 3 replicas using the image `nginx:alpine`. \nLabel the pods with `app=web`.\n\nExpose the deployment with three different services:\n1. A ClusterIP service named `web-svc-cluster` on port `80`\n2. A NodePort service named `web-svc-nodeport` on port `80`, using nodePort `30080`\n3. A LoadBalancer service named `web-svc-lb` on port `80`\n\nEnsure the namespace exists before creating the resources.", "question": "Create a Deployment named `web-app` in the `services` namespace with 3 replicas using the image `nginx:alpine`. \nLabel the pods with `app=web`.\n\nExpose the deployment with three different services:\n1. A ClusterIP service named `web-svc-cluster` on port `80`\n2. A NodePort service named `web-svc-nodeport` on port `80`, using nodePort `30080`\n3. A LoadBalancer service named `web-svc-lb` on port `80`\n\nEnsure the namespace exists before creating the resources.",
"concepts": ["services", "deployments", "clusterip", "nodeport", "loadbalancer"], "concepts": ["services", "deployments", "clusterip", "nodeport", "loadbalancer"],
"verification": [ "verification": [
@@ -246,7 +246,7 @@
{ {
"id": "7", "id": "7",
"namespace": "state", "namespace": "state",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Set up persistent storage for a database application in the `state` namespace:\n\n1. Create a PersistentVolume named `db-pv` with storage capacity of `1Gi`, access mode `ReadWriteOnce`, hostPath type pointing to `/mnt/data`, and reclaim policy `Retain`\n2. Create a PersistentVolumeClaim named `db-pvc` that requests `500Mi` storage with access mode `ReadWriteOnce`\n3. Create a Pod named `db-pod` using the `mysql:5.7` image that mounts the PVC at `/var/lib/mysql`\n4. Set the following environment variables for the pod: \n`MYSQL_ROOT_PASSWORD=rootpassword`, \n`MYSQL_DATABASE=mydb`, \n`MYSQL_USER=myuser`, \n`MYSQL_PASSWORD=mypassword`\n\nEnsure the namespace exists before creating the resources.", "question": "Set up persistent storage for a database application in the `state` namespace:\n\n1. Create a PersistentVolume named `db-pv` with storage capacity of `1Gi`, access mode `ReadWriteOnce`, hostPath type pointing to `/mnt/data`, and reclaim policy `Retain`\n2. Create a PersistentVolumeClaim named `db-pvc` that requests `500Mi` storage with access mode `ReadWriteOnce`\n3. Create a Pod named `db-pod` using the `mysql:5.7` image that mounts the PVC at `/var/lib/mysql`\n4. Set the following environment variables for the pod: \n`MYSQL_ROOT_PASSWORD=rootpassword`, \n`MYSQL_DATABASE=mydb`, \n`MYSQL_USER=myuser`, \n`MYSQL_PASSWORD=mypassword`\n\nEnsure the namespace exists before creating the resources.",
"concepts": ["persistent-volumes", "persistent-volume-claims", "volumes", "environment-variables"], "concepts": ["persistent-volumes", "persistent-volume-claims", "volumes", "environment-variables"],
"verification": [ "verification": [
@@ -297,7 +297,7 @@
{ {
"id": "8", "id": "8",
"namespace": "pod-design", "namespace": "pod-design",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a CronJob in the `pod-design` namespace with the following specifications:\n\n1. Name: `backup-job`\n2. Schedule: Every 5 minutes\n3. Container image: `busybox`\n4. Command: `['sh', '-c', 'echo Backup started: $(date); sleep 30; echo Backup completed: $(date)']`\n5. Configure the job with a restart policy of `OnFailure`\n6. Set a deadline of `100` seconds for the job to complete\n\nEnsure the namespace exists before creating the resource.", "question": "Create a CronJob in the `pod-design` namespace with the following specifications:\n\n1. Name: `backup-job`\n2. Schedule: Every 5 minutes\n3. Container image: `busybox`\n4. Command: `['sh', '-c', 'echo Backup started: $(date); sleep 30; echo Backup completed: $(date)']`\n5. Configure the job with a restart policy of `OnFailure`\n6. Set a deadline of `100` seconds for the job to complete\n\nEnsure the namespace exists before creating the resource.",
"concepts": ["cronjobs", "jobs", "scheduling"], "concepts": ["cronjobs", "jobs", "scheduling"],
"verification": [ "verification": [
@@ -334,33 +334,26 @@
{ {
"id": "9", "id": "9",
"namespace": "troubleshooting", "namespace": "troubleshooting",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "There is a deployment named `broken-deployment` in the `troubleshooting` namespace that is not functioning correctly. The deployment should have `3` replicas of `nginx:1.19` pods, but it`s failing.\n\nFind and fix the issue(s) with the deployment. Possible issues might include:\n- Incorrect image name or tag\n- Resource constraints that can`t be satisfied\n- Configuration problems with the pod template\n- Network policy restrictions\n\nEnsure the deployment functions correctly with 3 replicas running.", "question": "There is a deployment named `broken-deployment` in the `troubleshooting` namespace that is not functioning correctly. The deployment should have `3` replicas of `nginx:1.19` pods, but it's failing.\n\nFind and fix the issue(s) with the deployment. Possible issues might include:\n- Incorrect image name or tag\n- Resource constraints that can't be satisfied\n- Configuration problems with the pod template\n- Network policy restrictions\n\nEnsure the deployment functions correctly with 3 replicas running.",
"concepts": ["troubleshooting", "deployments", "debugging"], "concepts": ["troubleshooting", "deployments", "debugging"],
"verification": [ "verification": [
{ {
"id": "1", "id": "1",
"description": "Deployment exists",
"verificationScriptFile": "q9_s1_validate_deployment_exists.sh",
"expectedOutput": "0",
"weightage": 1
},
{
"id": "2",
"description": "Deployment has 3 replicas", "description": "Deployment has 3 replicas",
"verificationScriptFile": "q9_s2_validate_deployment_replicas.sh", "verificationScriptFile": "q9_s2_validate_deployment_replicas.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 2
}, },
{ {
"id": "3", "id": "2",
"description": "All pods are in running state", "description": "All pods are in running state",
"verificationScriptFile": "q9_s3_validate_pods_running.sh", "verificationScriptFile": "q9_s3_validate_pods_running.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 3 "weightage": 3
}, },
{ {
"id": "4", "id": "3",
"description": "Pods are using correct image", "description": "Pods are using correct image",
"verificationScriptFile": "q9_s4_validate_pods_image.sh", "verificationScriptFile": "q9_s4_validate_pods_image.sh",
"expectedOutput": "0", "expectedOutput": "0",
@@ -371,7 +364,7 @@
{ {
"id": "10", "id": "10",
"namespace": "networking", "namespace": "networking",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a NetworkPolicy in the `networking` namespace that restricts access to the `secure-db` pod with label `app=db` as follows:\n\n1. Allow ingress traffic only from pods with the label `role=frontend` on port `5432`\n2. Allow egress traffic only to pods with the label `role=monitoring` on port `8080`\n3. Deny all other traffic\n\nThen create three pods for testing:\n- A pod named `secure-db` with label `app=db` using image `postgres:12`\n- A pod named `frontend` with label `role=frontend` using image `nginx`\n- A pod named `monitoring` with label `role=monitoring` using image `nginx`\n\nEnsure the namespace exists before creating the resources.", "question": "Create a NetworkPolicy in the `networking` namespace that restricts access to the `secure-db` pod with label `app=db` as follows:\n\n1. Allow ingress traffic only from pods with the label `role=frontend` on port `5432`\n2. Allow egress traffic only to pods with the label `role=monitoring` on port `8080`\n3. Deny all other traffic\n\nThen create three pods for testing:\n- A pod named `secure-db` with label `app=db` using image `postgres:12`\n- A pod named `frontend` with label `role=frontend` using image `nginx`\n- A pod named `monitoring` with label `role=monitoring` using image `nginx`\n\nEnsure the namespace exists before creating the resources.",
"concepts": ["network-policies", "pods", "labels", "security"], "concepts": ["network-policies", "pods", "labels", "security"],
"verification": [ "verification": [
@@ -415,7 +408,7 @@
{ {
"id": "11", "id": "11",
"namespace": "security", "namespace": "security",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `secure-app` in the `security` namespace with the following security configurations:\n\n1. Run as non-root user (UID: `1000`)\n2. Set security context to drop all capabilities\n3. Set `readOnlyRootFilesystem` to true\n4. Add a security context to the container to run as non-root\n5. Use the `nginx:alpine` image\n\nEnsure the namespace exists before creating the pod.", "question": "Create a Pod named `secure-app` in the `security` namespace with the following security configurations:\n\n1. Run as non-root user (UID: `1000`)\n2. Set security context to drop all capabilities\n3. Set `readOnlyRootFilesystem` to true\n4. Add a security context to the container to run as non-root\n5. Use the `nginx:alpine` image\n\nEnsure the namespace exists before creating the pod.",
"concepts": ["security-context", "pod-security", "container-security"], "concepts": ["security-context", "pod-security", "container-security"],
"verification": [ "verification": [
@@ -451,36 +444,36 @@
}, },
{ {
"id": "12", "id": "12",
"namespace": "storage", "namespace": "docker-basics",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a StatefulSet named `web` in the `storage` namespace with the following specifications:\n\n1. Use image `nginx:stable`\n2. Replicas: `3`\n3. Service name: `web`\n4. Volume claim template with storage class `standard` and size 1Gi\n5. Mount the volume at `/usr/share/nginx/html`\n\nAlso create a headless service named `web` to expose the StatefulSet.\n\nEnsure the namespace exists before creating the resources.", "question": "Create a simple Docker image and run it:\n\n1. Create a file `/tmp/Dockerfile` with these contents:\n`\nFROM nginx:alpine\nCOPY index.html /usr/share/nginx/html/\nEXPOSE 80\n`\n\n2. Create a file `/tmp/index.html` with this content:\n`\n<!DOCTYPE html>\n<html>\n<body>\n<h1>Hello from CKAD Docker Question!</h1>\n</body>\n</html>\n`\n\n3. Build the Docker image with tag `my-nginx:v1`\n\n4. Run a container from this image with name `my-web` and publish port 80 to 8080 on the host\n\nNote: You'll need to use the Docker command line tools for this task.",
"concepts": ["statefulsets", "persistent-volume-claims", "headless-services"], "concepts": ["docker", "images", "containers"],
"verification": [ "verification": [
{ {
"id": "1", "id": "1",
"description": "Namespace is created", "description": "Dockerfile exists with correct content",
"verificationScriptFile": "q12_s1_validate_namespace.sh", "verificationScriptFile": "q12_s1_validate_dockerfile.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 1 "weightage": 1
}, },
{ {
"id": "2", "id": "2",
"description": "StatefulSet is created with correct specifications", "description": "index.html exists with proper content",
"verificationScriptFile": "q12_s2_validate_statefulset.sh", "verificationScriptFile": "q12_s2_validate_html.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 3 "weightage": 1
}, },
{ {
"id": "3", "id": "3",
"description": "Headless service is configured correctly", "description": "Docker image is built correctly",
"verificationScriptFile": "q12_s3_validate_service.sh", "verificationScriptFile": "q12_s3_validate_image.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 2
}, },
{ {
"id": "4", "id": "4",
"description": "Volume claim template is configured correctly", "description": "Container is running properly",
"verificationScriptFile": "q12_s4_validate_volume_claim.sh", "verificationScriptFile": "q12_s4_validate_container.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 2
} }
@@ -489,7 +482,7 @@
{ {
"id": "13", "id": "13",
"namespace": "jobs", "namespace": "jobs",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Job named `data-processor` in the `jobs` namespace with the following specifications:\n\n1. Use image `busybox`\n2. Command: `['sh', '-c', 'for i in $(seq 1 5); do echo Processing item $i; sleep 2; done']`\n3. Set restart policy to `Never`\n4. Set backoff limit to `4`\n5. Set active deadline seconds to `30`\n\nEnsure the namespace exists before creating the job.", "question": "Create a Job named `data-processor` in the `jobs` namespace with the following specifications:\n\n1. Use image `busybox`\n2. Command: `['sh', '-c', 'for i in $(seq 1 5); do echo Processing item $i; sleep 2; done']`\n3. Set restart policy to `Never`\n4. Set backoff limit to `4`\n5. Set active deadline seconds to `30`\n\nEnsure the namespace exists before creating the job.",
"concepts": ["jobs", "restart-policy", "backoff-limit"], "concepts": ["jobs", "restart-policy", "backoff-limit"],
"verification": [ "verification": [
@@ -526,7 +519,7 @@
{ {
"id": "14", "id": "14",
"namespace": "init-containers", "namespace": "init-containers",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `app-with-init` in the `init-containers` namespace with the following specifications:\n\n1. Main container using image `nginx`\n2. Init container using image `busybox` with command: `['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done']`\n3. Create a service named `myservice` using image `nginx`\n4. Share a volume between init container and main container at `/shared`\n\nEnsure the namespace exists before creating the resources.", "question": "Create a Pod named `app-with-init` in the `init-containers` namespace with the following specifications:\n\n1. Main container using image `nginx`\n2. Init container using image `busybox` with command: `['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done']`\n3. Create a service named `myservice` using image `nginx`\n4. Share a volume between init container and main container at `/shared`\n\nEnsure the namespace exists before creating the resources.",
"concepts": ["init-containers", "services", "shared-volumes"], "concepts": ["init-containers", "services", "shared-volumes"],
"verification": [ "verification": [
@@ -562,10 +555,10 @@
}, },
{ {
"id": "15", "id": "15",
"namespace": "resource-management", "namespace": "helm-basics",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a ResourceQuota named `compute-quota` in the `resource-management` namespace with the following limits:\n\n1. CPU: `4` cores\n2. Memory: `8Gi`\n3. Pods: `10`\n4. Services: `5`\n5. PersistentVolumeClaims: `5`\n\nThen create a LimitRange named `compute-limits` with:\n\n1. Default CPU request: `100m`\n2. Default CPU limit: `200m`\n3. Default memory request: `128Mi`\n4. Default memory limit: `256Mi`\n\nEnsure the namespace exists before creating the resources.", "question": "Perform basic Helm operations:\n\n1. Create a namespace named `helm-basics`\n2. Add the Bitnami repository to Helm with the name `bitnami`\n3. Install the `nginx` chart from the Bitnami repository in the `helm-basics` namespace with the release name `nginx-release`\n4. Save the release notes of the installation to a file at `/tmp/release-notes.txt`",
"concepts": ["resource-quotas", "limit-ranges", "resource-limits"], "concepts": ["helm", "package-management", "kubernetes-applications"],
"verification": [ "verification": [
{ {
"id": "1", "id": "1",
@@ -576,24 +569,24 @@
}, },
{ {
"id": "2", "id": "2",
"description": "ResourceQuota is created with correct limits", "description": "Helm chart is installed correctly",
"verificationScriptFile": "q15_s2_validate_resource_quota.sh", "verificationScriptFile": "q15_s2_validate_chart_install.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 3 "weightage": 3
}, },
{ {
"id": "3", "id": "3",
"description": "LimitRange is created with correct defaults", "description": "Release notes are saved to file",
"verificationScriptFile": "q15_s3_validate_limit_range.sh", "verificationScriptFile": "q15_s3_validate_release_notes.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 3 "weightage": 2
} }
] ]
}, },
{ {
"id": "16", "id": "16",
"namespace": "health-checks", "namespace": "health-checks",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `health-check-pod` in the `health-checks` namespace with the following specifications:\n\n1. Use image `nginx`\n\n2. Configure startup probe:\n - HTTP GET on port 80\n - Initial delay: 10s\n - Period: 3s\n - Failure threshold: 3\n\n3. Configure liveness probe:\n - HTTP GET on port 80\n - Initial delay: 15s\n - Period: 5s\n - Failure threshold: 3\n\n4. Configure readiness probe:\n - HTTP GET on port 80\n - Initial delay: 5s\n - Period: 3s\n - Failure threshold: 3\n\nEnsure the namespace exists before creating the pod.", "question": "Create a Pod named `health-check-pod` in the `health-checks` namespace with the following specifications:\n\n1. Use image `nginx`\n\n2. Configure startup probe:\n - HTTP GET on port 80\n - Initial delay: 10s\n - Period: 3s\n - Failure threshold: 3\n\n3. Configure liveness probe:\n - HTTP GET on port 80\n - Initial delay: 15s\n - Period: 5s\n - Failure threshold: 3\n\n4. Configure readiness probe:\n - HTTP GET on port 80\n - Initial delay: 5s\n - Period: 3s\n - Failure threshold: 3\n\nEnsure the namespace exists before creating the pod.",
"concepts": ["startup-probes", "liveness-probes", "readiness-probes"], "concepts": ["startup-probes", "liveness-probes", "readiness-probes"],
"verification": [ "verification": [
@@ -637,7 +630,7 @@
{ {
"id": "17", "id": "17",
"namespace": "pod-lifecycle", "namespace": "pod-lifecycle",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `lifecycle-pod` in the `pod-lifecycle` namespace with the following specifications:\n\n1. Use image `nginx`\n2. Add postStart hook that creates a file `/usr/share/nginx/html/welcome.txt` with content `Welcome to the pod!`\n3. Add preStop hook that waits for `10` seconds\n4. Set termination grace period to `30` seconds\n\nEnsure the namespace exists before creating the pod.", "question": "Create a Pod named `lifecycle-pod` in the `pod-lifecycle` namespace with the following specifications:\n\n1. Use image `nginx`\n2. Add postStart hook that creates a file `/usr/share/nginx/html/welcome.txt` with content `Welcome to the pod!`\n3. Add preStop hook that waits for `10` seconds\n4. Set termination grace period to `30` seconds\n\nEnsure the namespace exists before creating the pod.",
"concepts": ["lifecycle-hooks", "post-start", "pre-stop", "termination"], "concepts": ["lifecycle-hooks", "post-start", "pre-stop", "termination"],
"verification": [ "verification": [
@@ -680,10 +673,10 @@
}, },
{ {
"id": "18", "id": "18",
"namespace": "pod-scheduling", "namespace": "crd-demo",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `scheduled-pod` in the `pod-scheduling` namespace with the following specifications:\n\n1. Use image `nginx`\n2. Add node selector to run only on nodes with label `environment=production`\n3. Add toleration for taint `key=production:NoSchedule`\n4. Set priority class to `high-priority`\n\nFirst create a PriorityClass named `high-priority` with value 1000000.\n\nEnsure the namespace exists before creating the resources.", "question": "Create a Custom Resource Definition (CRD) for a simple application:\n\n1. Create a CRD named `applications.training.ckad.io` with:\n - Group: `training.ckad.io`\n - Version: `v1`\n - Kind: `Application`\n - Scope: `Namespaced`\n - Required fields: `spec.image` (string) and `spec.replicas` (integer)\n\n2. After creating the CRD, create a custom resource in the `crd-demo` namespace named `my-app` with:\n - Image: `nginx:1.19.0`\n - Replicas: `3`\n\nEnsure the namespace exists before creating the resources.",
"concepts": ["node-selectors", "tolerations", "priority-classes"], "concepts": ["custom-resource-definitions", "api-extensions", "custom-resources"],
"verification": [ "verification": [
{ {
"id": "1", "id": "1",
@@ -694,29 +687,29 @@
}, },
{ {
"id": "2", "id": "2",
"description": "PriorityClass is created correctly", "description": "CRD is created with correct configuration",
"verificationScriptFile": "q18_s2_validate_priority_class.sh", "verificationScriptFile": "q18_s2_validate_crd.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 3
}, },
{ {
"id": "3", "id": "3",
"description": "Pod has correct node selector", "description": "Custom resource is created with correct name",
"verificationScriptFile": "q18_s3_validate_node_selector.sh", "verificationScriptFile": "q18_s3_validate_cr_name.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 2
}, },
{ {
"id": "4", "id": "4",
"description": "Pod has correct toleration", "description": "Custom resource has correct image field",
"verificationScriptFile": "q18_s4_validate_toleration.sh", "verificationScriptFile": "q18_s4_validate_cr_image.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 2
}, },
{ {
"id": "5", "id": "5",
"description": "Pod uses correct priority class", "description": "Custom resource has correct replicas field",
"verificationScriptFile": "q18_s5_validate_priority.sh", "verificationScriptFile": "q18_s5_validate_cr_replicas.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 2
} }
@@ -724,52 +717,31 @@
}, },
{ {
"id": "19", "id": "19",
"namespace": "pod-networking", "namespace": "custom-columns-demo",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `network-pod` in the `pod-networking` namespace with the following specifications:\n\n1. Use image `nginx`\n2. Set hostname to `custom-hostname`\n3. Set subdomain to `custom-subdomain`\n4. Configure DNS policy to `ClusterFirstWithHostNet`\n5. Enable host networking\n\nAlso create a headless service named `network-service` to expose the pod.\n\nEnsure the namespace exists before creating the resources.", "question": "Use kubectl custom columns to extract and display pod information:\n\n1. There are several pods and deployments running in the `custom-columns-demo` namespace\n2. Create a custom column output showing all pods from all namespaces, including their names, namespaces, and container images\n3. Save this output to `/tmp/pod-images.txt`\n4. Create another output showing multi-container pod details with pod name, namespace, and all container images as comma-separated values\n5. Save this second output to `/tmp/all-container-images.txt`\n\nEnsure the namespace exists before starting your work.",
"concepts": ["pod-networking", "dns-policy", "host-networking"], "concepts": ["kubectl", "custom-columns", "jsonpath", "output-formatting"],
"verification": [ "verification": [
{
"id": "1",
"description": "Namespace is created",
"verificationScriptFile": "q19_s1_validate_namespace.sh",
"expectedOutput": "0",
"weightage": 1
},
{ {
"id": "2", "id": "2",
"description": "Pod is created with correct name and image", "description": "pod-images.txt file exists and has correct format",
"verificationScriptFile": "q19_s2_validate_pod.sh", "verificationScriptFile": "q19_s2_validate_basic_output.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 1 "weightage": 2
}, },
{ {
"id": "3", "id": "3",
"description": "Pod has correct hostname and subdomain", "description": "all-container-images.txt file exists with multi-container details",
"verificationScriptFile": "q19_s3_validate_hostname.sh", "verificationScriptFile": "q19_s3_validate_advanced_output.sh",
"expectedOutput": "0", "expectedOutput": "0",
"weightage": 2 "weightage": 3
},
{
"id": "4",
"description": "Pod has correct DNS policy and host networking",
"verificationScriptFile": "q19_s4_validate_networking.sh",
"expectedOutput": "0",
"weightage": 2
},
{
"id": "5",
"description": "Headless service is configured correctly",
"verificationScriptFile": "q19_s5_validate_service.sh",
"expectedOutput": "0",
"weightage": 2
} }
] ]
}, },
{ {
"id": "20", "id": "20",
"namespace": "pod-configuration", "namespace": "pod-configuration",
"machineHostname": "ckad9988", "machineHostname": "ckad9999",
"question": "Create a Pod named `config-pod` in the `pod-configuration` namespace with the following specifications:\n\n1. Use image `nginx`\n2. Add environment variables:\n - `APP_ENV=production`\n - `DEBUG=false`\n3. Add environment variables from ConfigMap `app-config` with keys:\n - `DB_HOST`\n - `DB_PORT`\n4. Mount ConfigMap `app-config` as a volume at `/etc/app-config`\n5. Add environment variables from Secret `app-secret` with keys:\n - `API_KEY`\n - `API_SECRET`\n\nFirst create the ConfigMap and Secret with the specified data.\n\nEnsure the namespace exists before creating the resources.", "question": "Create a Pod named `config-pod` in the `pod-configuration` namespace with the following specifications:\n\n1. Use image `nginx`\n2. Add environment variables:\n - `APP_ENV=production`\n - `DEBUG=false`\n3. Add environment variables from ConfigMap `app-config` with keys:\n - `DB_HOST`\n - `DB_PORT`\n4. Mount ConfigMap `app-config` as a volume at `/etc/app-config`\n5. Add environment variables from Secret `app-secret` with keys:\n - `API_KEY`\n - `API_SECRET`\n\nFirst create the ConfigMap and Secret with the specified data.\n\nEnsure the namespace exists before creating the resources.",
"concepts": ["environment-variables", "configmaps", "secrets", "volumes"], "concepts": ["environment-variables", "configmaps", "secrets", "volumes"],
"verification": [ "verification": [

View File

@@ -1,12 +1,20 @@
#!/bin/bash #!/bin/bash
# Delete the storage namespace if it exists # Make sure Docker is available
echo "Setting up environment for Question 12 (StatefulSet)..." which docker > /dev/null 2>&1
kubectl delete namespace storage --ignore-not-found=true if [[ $? -ne 0 ]]; then
echo "Docker is not available on this system"
exit 1
fi
# Wait for deletion to complete # Create directory for working files if it doesn't exist
sleep 2 mkdir -p /tmp
# Confirm environment is ready # Clean up any existing resources that might conflict
echo "Environment ready for Question 12" docker stop my-web > /dev/null 2>&1
docker rm my-web > /dev/null 2>&1
docker rmi my-nginx:v1 > /dev/null 2>&1
rm -f /tmp/Dockerfile /tmp/index.html > /dev/null 2>&1
echo "Setup complete for Question 12"
exit 0 exit 0

View File

@@ -1,12 +1,14 @@
#!/bin/bash #!/bin/bash
# Delete the resource-management namespace if it exists # Check if Helm is installed
echo "Setting up environment for Question 15 (Resource Quotas)..." if ! command -v helm &> /dev/null; then
kubectl delete namespace resource-management --ignore-not-found=true echo "Helm is not available, skipping setup"
exit 0
fi
# Wait for deletion to complete # Clean up any existing resources
sleep 2 kubectl delete namespace helm-basics --ignore-not-found=true
# Confirm environment is ready
echo "Environment ready for Question 15" echo "Setup complete for Question 15"
exit 0 exit 0

View File

@@ -1,15 +1,3 @@
#!/bin/bash #!/bin/bash
echo "Setup complete for Question 18"
# Delete the pod-scheduling namespace if it exists
echo "Setting up environment for Question 18 (Pod Scheduling)..."
kubectl delete namespace pod-scheduling --ignore-not-found=true
# Delete the high-priority PriorityClass if it exists
kubectl delete priorityclass high-priority --ignore-not-found=true
# Wait for deletion to complete
sleep 2
# Confirm environment is ready
echo "Environment ready for Question 18"
exit 0 exit 0

View File

@@ -1,12 +1,68 @@
#!/bin/bash #!/bin/bash
# Delete the pod-networking namespace if it exists # Create namespace and sample resources for custom columns demo
echo "Setting up environment for Question 19 (Pod Networking)..." kubectl create namespace custom-columns-demo > /dev/null 2>&1
kubectl delete namespace pod-networking --ignore-not-found=true
# Wait for deletion to complete # Create sample pods and deployments with different images
sleep 2 kubectl apply -f - <<EOF > /dev/null 2>&1
apiVersion: v1
kind: Namespace
metadata:
name: custom-columns-demo
---
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
namespace: custom-columns-demo
spec:
containers:
- name: nginx
image: nginx:1.19
---
apiVersion: v1
kind: Pod
metadata:
name: busybox-pod
namespace: custom-columns-demo
spec:
containers:
- name: busybox
image: busybox:latest
command: ["sleep", "3600"]
---
apiVersion: v1
kind: Pod
metadata:
name: multi-container-pod
namespace: custom-columns-demo
spec:
containers:
- name: nginx
image: nginx:alpine
- name: sidecar
image: busybox:1.34
command: ["sleep", "3600"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-deployment
namespace: custom-columns-demo
spec:
replicas: 2
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:alpine
EOF
# Confirm environment is ready echo "Setup complete for Question 19"
echo "Environment ready for Question 19"
exit 0 exit 0

View File

@@ -11,7 +11,7 @@ metadata:
name: broken-deployment name: broken-deployment
namespace: troubleshooting namespace: troubleshooting
spec: spec:
replicas: 3 replicas: 1
selector: selector:
matchLabels: matchLabels:
app: nginx app: nginx

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# Check if the Dockerfile exists
if [[ ! -f "/tmp/Dockerfile" ]]; then
echo "❌ File '/tmp/Dockerfile' not found"
exit 1
fi
# Check if the Dockerfile contains necessary elements
if ! grep -q "FROM.*nginx:alpine" /tmp/Dockerfile; then
echo "❌ Dockerfile should use 'nginx:alpine' as base image"
exit 1
fi
if ! grep -q "COPY.*index.html" /tmp/Dockerfile; then
echo "❌ Dockerfile should copy 'index.html' file"
exit 1
fi
if ! grep -q "EXPOSE.*80" /tmp/Dockerfile; then
echo "❌ Dockerfile should expose port 80"
exit 1
fi
echo "✅ Dockerfile exists with correct content"
exit 0

View File

@@ -1,13 +0,0 @@
#!/bin/bash
# Validate that the storage namespace exists
NS=$(kubectl get namespace storage -o jsonpath='{.metadata.name}' 2>/dev/null)
if [[ "$NS" == "storage" ]]; then
# Namespace exists
exit 0
else
# Namespace does not exist
echo "Namespace 'storage' does not exist"
exit 1
fi

View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Check if the index.html exists
if [[ ! -f "/tmp/index.html" ]]; then
echo "❌ File '/tmp/index.html' not found"
exit 1
fi
# Check if the HTML contains necessary content
if ! grep -q "Hello from CKAD Docker Question" /tmp/index.html; then
echo "❌ HTML file should contain 'Hello from CKAD Docker Question'"
exit 1
fi
# Check basic HTML structure
if ! grep -q "<!DOCTYPE html>" /tmp/index.html; then
echo "❌ HTML file should have DOCTYPE declaration"
exit 1
fi
if ! grep -q "<html>" /tmp/index.html; then
echo "❌ HTML file should contain <html> tag"
exit 1
fi
if ! grep -q "<body>" /tmp/index.html; then
echo "❌ HTML file should contain <body> tag"
exit 1
fi
echo "✅ index.html exists with correct content"
exit 0

View File

@@ -1,32 +0,0 @@
#!/bin/bash
# Validate that the web StatefulSet exists
STATEFULSET=$(kubectl get statefulset web -n storage -o jsonpath='{.metadata.name}' 2>/dev/null)
if [[ "$STATEFULSET" == "web" ]]; then
# StatefulSet exists, now check the specs
# Check image
IMAGE=$(kubectl get statefulset web -n storage -o jsonpath='{.spec.template.spec.containers[0].image}' 2>/dev/null)
# Check replicas
REPLICAS=$(kubectl get statefulset web -n storage -o jsonpath='{.spec.replicas}' 2>/dev/null)
# Check service name
SERVICE_NAME=$(kubectl get statefulset web -n storage -o jsonpath='{.spec.serviceName}' 2>/dev/null)
if [[ "$IMAGE" == "nginx:stable" && "$REPLICAS" == "3" && "$SERVICE_NAME" == "web" ]]; then
# All specifications are correct
exit 0
else
echo "StatefulSet 'web' does not have correct specifications."
echo "Found image: $IMAGE (expected: nginx:stable)"
echo "Found replicas: $REPLICAS (expected: 3)"
echo "Found service name: $SERVICE_NAME (expected: web)"
exit 1
fi
else
# StatefulSet does not exist
echo "StatefulSet 'web' does not exist in the 'storage' namespace"
exit 1
fi

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Check if the Docker image exists
docker image inspect my-nginx:v1 &> /dev/null
if [[ $? -ne 0 ]]; then
echo "❌ Docker image 'my-nginx:v1' not found"
exit 1
fi
#Docker image is built correctly
echo "✅ Docker image 'my-nginx:v1' has been built correctly with all required elements"
exit 0

View File

@@ -1,31 +0,0 @@
#!/bin/bash
# Validate that the web headless service exists
SERVICE=$(kubectl get service web -n storage -o jsonpath='{.metadata.name}' 2>/dev/null)
if [[ "$SERVICE" == "web" ]]; then
# Service exists, now check if it's a headless service
CLUSTER_IP=$(kubectl get service web -n storage -o jsonpath='{.spec.clusterIP}' 2>/dev/null)
if [[ "$CLUSTER_IP" == "None" ]]; then
# This is a headless service
# Check selector to make sure it matches the StatefulSet
SELECTOR=$(kubectl get service web -n storage -o jsonpath='{.spec.selector.app}' 2>/dev/null)
if [[ "$SELECTOR" != "" ]]; then
# Selector exists
exit 0
else
echo "Headless service 'web' does not have a selector"
exit 1
fi
else
echo "Service 'web' is not a headless service. ClusterIP: $CLUSTER_IP"
exit 1
fi
else
# Service does not exist
echo "Service 'web' does not exist in the 'storage' namespace"
exit 1
fi

View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Check if the container exists
docker container inspect my-web > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "❌ Container 'my-web' not found"
exit 1
fi
# Check if the container is running
CONTAINER_STATE=$(docker container inspect my-web --format '{{.State.Status}}')
if [[ "$CONTAINER_STATE" != "running" ]]; then
echo "❌ Container 'my-web' is not running (current state: $CONTAINER_STATE)"
exit 1
fi
# Check if the container is using the correct image
CONTAINER_IMAGE=$(docker container inspect my-web --format '{{.Config.Image}}')
if [[ "$CONTAINER_IMAGE" != "my-nginx:v1" ]]; then
echo "❌ Container should use 'my-nginx:v1' image (current image: $CONTAINER_IMAGE)"
exit 1
fi
# Check if port 80 is published to 8080
PORT_MAPPING=$(docker container inspect my-web --format '{{json .HostConfig.PortBindings}}' | grep -o "8080")
if [[ -z "$PORT_MAPPING" ]]; then
echo "❌ Container should publish port 80 to port 8080 on the host"
exit 1
fi
echo "✅ Container 'my-web' is running properly"
exit 0

View File

@@ -1,32 +0,0 @@
#!/bin/bash
# Validate that the StatefulSet has a volume claim template
VCT=$(kubectl get statefulset web -n storage -o jsonpath='{.spec.volumeClaimTemplates[0].metadata.name}' 2>/dev/null)
if [[ "$VCT" != "" ]]; then
# Volume claim template exists, check storage class and size
# Check storage class
STORAGE_CLASS=$(kubectl get statefulset web -n storage -o jsonpath='{.spec.volumeClaimTemplates[0].spec.storageClassName}' 2>/dev/null)
# Check storage size
STORAGE_SIZE=$(kubectl get statefulset web -n storage -o jsonpath='{.spec.volumeClaimTemplates[0].spec.resources.requests.storage}' 2>/dev/null)
# Check mount path
MOUNT_PATH=$(kubectl get statefulset web -n storage -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[0].mountPath}' 2>/dev/null)
if [[ "$STORAGE_CLASS" == "standard" && "$STORAGE_SIZE" == "1Gi" && "$MOUNT_PATH" == "/usr/share/nginx/html" ]]; then
# Volume claim template is configured correctly
exit 0
else
echo "Volume claim template is not configured correctly."
echo "Found storage class: $STORAGE_CLASS (expected: standard)"
echo "Found storage size: $STORAGE_SIZE (expected: 1Gi)"
echo "Found mount path: $MOUNT_PATH (expected: /usr/share/nginx/html)"
exit 1
fi
else
# No volume claim template
echo "StatefulSet 'web' does not have a volume claim template"
exit 1
fi

View File

@@ -4,25 +4,26 @@
JOB=$(kubectl get job data-processor -n jobs -o jsonpath='{.metadata.name}' 2>/dev/null) JOB=$(kubectl get job data-processor -n jobs -o jsonpath='{.metadata.name}' 2>/dev/null)
if [[ "$JOB" == "data-processor" ]]; then if [[ "$JOB" == "data-processor" ]]; then
# Job exists, now check the specs echo "✅ Job 'data-processor' exists in namespace 'jobs'."
# Check image # Check image
IMAGE=$(kubectl get job data-processor -n jobs -o jsonpath='{.spec.template.spec.containers[0].image}' 2>/dev/null) IMAGE=$(kubectl get job data-processor -n jobs -o jsonpath='{.spec.template.spec.containers[0].image}' 2>/dev/null)
# Check command # Get full command string
COMMAND=$(kubectl get job data-processor -n jobs -o jsonpath='{.spec.template.spec.containers[0].command}' 2>/dev/null) COMMAND=$(kubectl get job data-processor -n jobs -o jsonpath='{.spec.template.spec.containers[0].command[*]}' 2>/dev/null)
if [[ "$IMAGE" == "busybox" && "$COMMAND" == *"for i in"* && "$COMMAND" == *"seq 1 5"* ]]; then # Use relaxed matching instead of strict string comparison
# Job has correct image and command if [[ "$IMAGE" == "busybox" && "$COMMAND" == *"Processing item"* && "$COMMAND" == *"sleep 2"* ]]; then
echo "✅ Job has the correct image and command logic."
exit 0 exit 0
else else
echo "Job 'data-processor' does not have correct specifications." echo "Job 'data-processor' does not have correct specifications."
echo "Found image: $IMAGE (expected: busybox)" echo "Found image: $IMAGE (expected: busybox)"
echo "Found command: $COMMAND (expected: command that loops from 1 to 5)" echo "Found command: $COMMAND"
echo " ➤ Expected command to include: 'Processing item' and 'sleep 2'"
exit 1 exit 1
fi fi
else else
# Job does not exist echo "❌ Job 'data-processor' does not exist in the 'jobs' namespace."
echo "Job 'data-processor' does not exist in the 'jobs' namespace"
exit 1 exit 1
fi fi

View File

@@ -1,13 +1,11 @@
#!/bin/bash #!/bin/bash
# Validate that the resource-management namespace exists # Check if the namespace exists
NS=$(kubectl get namespace resource-management -o jsonpath='{.metadata.name}' 2>/dev/null) kubectl get namespace helm-basics &> /dev/null
if [[ $? -eq 0 ]]; then
if [[ "$NS" == "resource-management" ]]; then echo "✅ Namespace 'helm-basics' exists"
# Namespace exists exit 0
exit 0
else else
# Namespace does not exist echo "❌ Namespace 'helm-basics' not found"
echo "Namespace 'resource-management' does not exist" exit 1
exit 1
fi fi

View File

@@ -0,0 +1,31 @@
#!/bin/bash
# Check if Helm is installed
if ! command -v helm &> /dev/null; then
echo "❌ Helm is not available on this system"
exit 1
fi
# Check if Bitnami repository is added
REPO_EXISTS=$(helm repo list | grep bitnami | wc -l)
if [[ "$REPO_EXISTS" -eq 0 ]]; then
echo "❌ Bitnami repository is not added to Helm"
exit 1
fi
# Check if the nginx chart is installed
RELEASE_EXISTS=$(helm list -n helm-basics | grep nginx-release | wc -l)
if [[ "$RELEASE_EXISTS" -eq 0 ]]; then
echo "❌ nginx chart is not installed in the 'helm-basics' namespace"
exit 1
fi
# Check if pods related to the release are running
PODS_RUNNING=$(kubectl get pods -n helm-basics -l app.kubernetes.io/instance=nginx-release | grep Running | wc -l)
if [[ "$PODS_RUNNING" -eq 0 ]]; then
echo "❌ No pods from the nginx release are running"
exit 1
fi
echo "✅ Helm chart 'nginx' is installed correctly in the 'helm-basics' namespace"
exit 0

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Check if the fixed values.yaml file exists
if [[ ! -f "/tmp/fixed-chart/values.yaml" ]]; then
echo "❌ File '/tmp/fixed-chart/values.yaml' not found"
exit 1
fi
# Check if the values.yaml file has correct content
VALUES_CONTENT=$(cat /tmp/fixed-chart/values.yaml)
# Check replicaCount
if ! grep -q "replicaCount: 2" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'replicaCount: 2'"
exit 1
fi
# Check image section
if ! grep -q "image:" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'image:' section"
exit 1
fi
if ! grep -q "repository: nginx" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'repository: nginx' under image section"
exit 1
fi
if ! grep -q "tag: 1.19.0" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'tag: 1.19.0' under image section"
exit 1
fi
# Check service section
if ! grep -q "service:" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'service:' section"
exit 1
fi
if ! grep -q "type: ClusterIP" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'type: ClusterIP' under service section"
exit 1
fi
# Check resources section
if ! grep -q "resources:" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'resources:' section"
exit 1
fi
if ! grep -q "limits:" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'limits:' under resources section"
exit 1
fi
if ! grep -q "cpu: 100m" /tmp/fixed-chart/values.yaml; then
echo "❌ values.yaml should have 'cpu: 100m' under limits section"
exit 1
fi
# Check for correct indentation
# This is a simple check - in a real exam we would need more comprehensive validation
INDENT_CHECK=$(grep -E '^ {2}[a-z]+:' /tmp/fixed-chart/values.yaml | wc -l)
if [[ "$INDENT_CHECK" -lt 3 ]]; then
echo "❌ values.yaml does not have proper indentation"
exit 1
fi
echo "✅ values.yaml is properly formatted with correct content"
exit 0

View File

@@ -0,0 +1,61 @@
#!/bin/bash
# Check if the fixed chart.yaml file exists
if [[ ! -f "/tmp/fixed-chart/chart.yaml" ]]; then
echo "❌ File '/tmp/fixed-chart/chart.yaml' not found"
exit 1
fi
# Check if the chart.yaml file has correct content
CHART_CONTENT=$(cat /tmp/fixed-chart/chart.yaml)
# Check basic chart properties
if ! grep -q "apiVersion: v2" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have 'apiVersion: v2'"
exit 1
fi
if ! grep -q "name: example-app" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have 'name: example-app'"
exit 1
fi
if ! grep -q "description: A simple example Helm chart" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have a description"
exit 1
fi
if ! grep -q "type: application" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have 'type: application'"
exit 1
fi
if ! grep -q "version: 0.1.0" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have 'version: 0.1.0'"
exit 1
fi
# Check for dependencies section
if ! grep -q "dependencies:" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have 'dependencies:' section"
exit 1
fi
# Check for nginx-ingress dependency
if ! grep -q "name: nginx-ingress" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have 'name: nginx-ingress' under dependencies"
exit 1
fi
if ! grep -q "version: \"1.41.0\"" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should have 'version: \"1.41.0\"' for nginx-ingress dependency"
exit 1
fi
if ! grep -q "repository: \"https://charts.bitnami.com/bitnami\"" /tmp/fixed-chart/chart.yaml; then
echo "❌ chart.yaml should specify Bitnami repository for nginx-ingress"
exit 1
fi
echo "✅ chart.yaml is properly formatted with correct dependencies"
exit 0

View File

@@ -0,0 +1,22 @@
#!/bin/bash
# Check if the release notes file exists
if [[ ! -f "/tmp/release-notes.txt" ]]; then
echo "❌ File '/tmp/release-notes.txt' not found"
exit 1
fi
# Check if the file has content
if [[ ! -s "/tmp/release-notes.txt" ]]; then
echo "❌ Release notes file is empty"
exit 1
fi
# Check if the file actually contains release notes
if ! grep -q "RELEASE NOTES" /tmp/release-notes.txt && ! grep -q "Bitnami" /tmp/release-notes.txt && ! grep -q "nginx" /tmp/release-notes.txt; then
echo "❌ File does not appear to contain Helm release notes"
exit 1
fi
echo "✅ Release notes are saved correctly to /tmp/release-notes.txt"
exit 0

View File

@@ -7,8 +7,8 @@ if [[ "$POD" == "lifecycle-pod" ]]; then
# Pod exists, now check if it has a termination grace period # Pod exists, now check if it has a termination grace period
GRACE_PERIOD=$(kubectl get pod lifecycle-pod -n pod-lifecycle -o jsonpath='{.spec.terminationGracePeriodSeconds}' 2>/dev/null) GRACE_PERIOD=$(kubectl get pod lifecycle-pod -n pod-lifecycle -o jsonpath='{.spec.terminationGracePeriodSeconds}' 2>/dev/null)
if [[ "$GRACE_PERIOD" == "45" ]]; then if [[ "$GRACE_PERIOD" == "30" ]]; then
# Grace period is correctly set to 45 seconds # Grace period is correctly set to 30 seconds
exit 0 exit 0
elif [[ "$GRACE_PERIOD" == "" ]]; then elif [[ "$GRACE_PERIOD" == "" ]]; then
echo "Pod 'lifecycle-pod' does not have a termination grace period specified (using default of 30 seconds)" echo "Pod 'lifecycle-pod' does not have a termination grace period specified (using default of 30 seconds)"

View File

@@ -1,13 +1,11 @@
#!/bin/bash #!/bin/bash
# Validate that the pod-scheduling namespace exists # Check if the namespace exists
NS=$(kubectl get namespace pod-scheduling -o jsonpath='{.metadata.name}' 2>/dev/null) kubectl get namespace crd-demo &> /dev/null
if [[ $? -eq 0 ]]; then
if [[ "$NS" == "pod-scheduling" ]]; then echo "✅ Namespace 'crd-demo' exists"
# Namespace exists exit 0
exit 0
else else
# Namespace does not exist echo "❌ Namespace 'crd-demo' not found"
echo "Namespace 'pod-scheduling' does not exist" exit 1
exit 1
fi fi

View File

@@ -0,0 +1,40 @@
#!/bin/bash
# Check if the CRD exists
kubectl get crd applications.training.ckad.io &> /dev/null
if [[ $? -ne 0 ]]; then
echo "❌ CRD 'applications.training.ckad.io' not found"
exit 1
fi
# Check group
GROUP=$(kubectl get crd applications.training.ckad.io -o jsonpath='{.spec.group}')
if [[ "$GROUP" != "training.ckad.io" ]]; then
echo "❌ CRD has incorrect group. Expected 'training.ckad.io', got '$GROUP'"
exit 1
fi
# Check kind
KIND=$(kubectl get crd applications.training.ckad.io -o jsonpath='{.spec.names.kind}')
if [[ "$KIND" != "Application" ]]; then
echo "❌ CRD has incorrect kind. Expected 'Application', got '$KIND'"
exit 1
fi
# Check scope
SCOPE=$(kubectl get crd applications.training.ckad.io -o jsonpath='{.spec.scope}')
if [[ "$SCOPE" != "Namespaced" ]]; then
echo "❌ CRD has incorrect scope. Expected 'Namespaced', got '$SCOPE'"
exit 1
fi
# Check for required fields in schema
# This is a simple check - in a real scenario we would do more validation
SCHEMA=$(kubectl get crd applications.training.ckad.io -o jsonpath='{.spec.versions[*].schema.openAPIV3Schema.properties.spec.properties}')
if [[ "$SCHEMA" != *"image"* || "$SCHEMA" != *"replicas"* ]]; then
echo "❌ CRD schema should define 'image' and 'replicas' fields"
exit 1
fi
echo "✅ CRD 'applications.training.ckad.io' is configured correctly"
exit 0

View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Check if the CRD exists first (prerequisite)
kubectl get crd applications.training.ckad.io &> /dev/null
if [[ $? -ne 0 ]]; then
echo "❌ CRD 'applications.training.ckad.io' not found. Cannot validate custom resources."
exit 1
fi
# Check if the custom resource exists
kubectl get application my-app -n crd-demo &> /dev/null
if [[ $? -ne 0 ]]; then
echo "❌ Custom resource 'my-app' not found in namespace 'crd-demo'"
exit 1
fi
echo "✅ Custom resource 'my-app' exists with correct name"
exit 0

View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Check if the custom resource exists first
kubectl get application my-app -n crd-demo &> /dev/null
if [[ $? -ne 0 ]]; then
echo "❌ Custom resource 'my-app' not found in namespace 'crd-demo'"
exit 1
fi
# Check if the image field is set correctly
IMAGE=$(kubectl get application my-app -n crd-demo -o jsonpath='{.spec.image}' 2>/dev/null)
if [[ "$IMAGE" != "nginx:1.19.0" ]]; then
echo "❌ Custom resource should have spec.image='nginx:1.19.0'. Current value: '$IMAGE'"
exit 1
fi
echo "✅ Custom resource has correct image field"
exit 0

View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Check if the custom resource exists first
kubectl get application my-app -n crd-demo &> /dev/null
if [[ $? -ne 0 ]]; then
echo "❌ Custom resource 'my-app' not found in namespace 'crd-demo'"
exit 1
fi
# Check if the replicas field is set correctly
REPLICAS=$(kubectl get application my-app -n crd-demo -o jsonpath='{.spec.replicas}' 2>/dev/null)
if [[ "$REPLICAS" != "3" ]]; then
echo "❌ Custom resource should have spec.replicas=3. Current value: '$REPLICAS'"
exit 1
fi
echo "✅ Custom resource has correct replicas field"
exit 0

View File

@@ -1,13 +1,11 @@
#!/bin/bash #!/bin/bash
# Validate that the pod-networking namespace exists # Check if the namespace exists
NS=$(kubectl get namespace pod-networking -o jsonpath='{.metadata.name}' 2>/dev/null) kubectl get namespace custom-columns-demo &> /dev/null
if [[ $? -eq 0 ]]; then
if [[ "$NS" == "pod-networking" ]]; then echo "✅ Namespace 'custom-columns-demo' exists"
# Namespace exists exit 0
exit 0
else else
# Namespace does not exist echo "❌ Namespace 'custom-columns-demo' not found"
echo "Namespace 'pod-networking' does not exist" exit 1
exit 1
fi fi

View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Check if the output file exists
if [[ ! -f "/tmp/pod-images.txt" ]]; then
echo "❌ File '/tmp/pod-images.txt' not found"
exit 1
fi
# Check if the file has content
if [[ ! -s "/tmp/pod-images.txt" ]]; then
echo "❌ File '/tmp/pod-images.txt' is empty"
exit 1
fi
# Check if the file contains custom column headers
if ! grep -q "POD.*NAMESPACE.*IMAGE" /tmp/pod-images.txt; then
echo "❌ File should contain column headers for POD, NAMESPACE, and IMAGE"
exit 1
fi
# Check if our sample pods are listed
if ! grep -q "nginx-pod.*custom-columns-demo.*nginx:1.19" /tmp/pod-images.txt; then
echo "❌ File should contain nginx-pod from custom-columns-demo namespace with image nginx:1.19"
exit 1
fi
if ! grep -q "busybox-pod.*custom-columns-demo.*busybox" /tmp/pod-images.txt; then
echo "❌ File should contain busybox-pod from custom-columns-demo namespace"
exit 1
fi
# Check if the file contains pods from other namespaces
# This is a simplistic check - in a real scenario we'd verify actual pods from other namespaces
NAMESPACE_COUNT=$(grep -v "custom-columns-demo" /tmp/pod-images.txt | grep -v "NAMESPACE" | wc -l)
if [[ "$NAMESPACE_COUNT" -eq 0 ]]; then
echo "❌ File should contain pods from namespaces other than custom-columns-demo"
exit 1
fi
echo "✅ pod-images.txt file exists with correct custom column format"
exit 0

View File

@@ -0,0 +1,46 @@
#!/bin/bash
# Check if the output file exists
if [[ ! -f "/tmp/all-container-images.txt" ]]; then
echo "❌ File '/tmp/all-container-images.txt' not found"
exit 1
fi
# Check if the file has content
if [[ ! -s "/tmp/all-container-images.txt" ]]; then
echo "❌ File '/tmp/all-container-images.txt' is empty"
exit 1
fi
# Check if the file contains multi-container pod information
# Our setup included a pod named multi-container-pod with nginx and busybox containers
MULTI_CONTAINER_LINE=$(grep "multi-container-pod" /tmp/all-container-images.txt)
if [[ -z "$MULTI_CONTAINER_LINE" ]]; then
echo "❌ File should contain information about multi-container-pod"
exit 1
fi
# Check if both container images are listed for the multi-container pod
# This could be done in several ways depending on the output format chosen by the user
if echo "$MULTI_CONTAINER_LINE" | grep -q "nginx:alpine" && echo "$MULTI_CONTAINER_LINE" | grep -q "busybox:1.34"; then
# Both images are found on the same line
echo "✅ File correctly shows both container images for multi-container-pod"
elif [[ $(grep "multi-container-pod.*nginx:alpine" /tmp/all-container-images.txt | wc -l) -gt 0 && $(grep "multi-container-pod.*busybox:1.34" /tmp/all-container-images.txt | wc -l) -gt 0 ]]; then
# Images might be on separate lines
echo "✅ File contains both container images for multi-container-pod"
else
echo "❌ File should list both nginx:alpine and busybox:1.34 images for multi-container-pod"
exit 1
fi
# Check if the file format includes the pod name and namespace
# This is a simplistic check - there are multiple valid formats
if grep -q "POD.*NAMESPACE.*IMAGES" /tmp/all-container-images.txt || grep -E -q ".*,.*,.*" /tmp/all-container-images.txt; then
echo "✅ File format includes pod name, namespace, and images"
else
echo "❌ File should include pod name, namespace, and images information"
exit 1
fi
echo "✅ all-container-images.txt contains multi-container pod details"
exit 0

View File

@@ -1,13 +1,11 @@
#!/bin/bash #!/bin/bash
# Validate that the network-policy namespace exists # Check if namespace exists
NS=$(kubectl get namespace network-policy -o jsonpath='{.metadata.name}' 2>/dev/null) NS_EXISTS=$(kubectl get namespace pod-configuration --no-headers --output=name 2>/dev/null | wc -l)
if [[ "$NS_EXISTS" -eq 1 ]]; then
if [[ "$NS" == "network-policy" ]]; then echo "✅ Namespace 'pod-configuration' exists"
# Namespace exists exit 0
exit 0
else else
# Namespace does not exist echo "❌ Namespace 'pod-configuration' not found"
echo "Namespace 'network-policy' does not exist" exit 1
exit 1
fi fi

Some files were not shown because too many files have changed in this diff Show More