KubeOS/0004-test-rust-proxy-add-drain-integration-test.patch

460 lines
15 KiB
Diff
Raw Normal View History

From 916ca24576d33dc024944a7ed18aaa39e95753f9 Mon Sep 17 00:00:00 2001
From: Yuhang Wei <weiyuhang3@huawei.com>
Date: Thu, 18 Jan 2024 11:13:20 +0800
Subject: [PATCH 04/13] test(rust proxy):add drain integration test
move drain into a lib for integration test.
use kind to deploy a cluster for integration test.
Signed-off-by: Yuhang Wei <weiyuhang3@huawei.com>
---
KubeOS-Rust/proxy/Cargo.toml | 7 ++
.../proxy/src/controller/controller.rs | 4 +-
KubeOS-Rust/proxy/src/controller/mod.rs | 1 -
KubeOS-Rust/proxy/src/controller/values.rs | 12 ---
.../proxy/src/{controller => }/drain.rs | 12 ++-
KubeOS-Rust/proxy/tests/common/mod.rs | 63 +++++++++++
KubeOS-Rust/proxy/tests/drain_test.rs | 41 +++++++
.../proxy/tests/setup/kind-config.yaml | 5 +
KubeOS-Rust/proxy/tests/setup/resources.yaml | 102 ++++++++++++++++++
.../proxy/tests/setup/setup_test_env.sh | 81 ++++++++++++++
10 files changed, 309 insertions(+), 19 deletions(-)
rename KubeOS-Rust/proxy/src/{controller => }/drain.rs (97%)
create mode 100644 KubeOS-Rust/proxy/tests/common/mod.rs
create mode 100644 KubeOS-Rust/proxy/tests/drain_test.rs
create mode 100644 KubeOS-Rust/proxy/tests/setup/kind-config.yaml
create mode 100644 KubeOS-Rust/proxy/tests/setup/resources.yaml
create mode 100644 KubeOS-Rust/proxy/tests/setup/setup_test_env.sh
diff --git a/KubeOS-Rust/proxy/Cargo.toml b/KubeOS-Rust/proxy/Cargo.toml
index 9a148e8..72eb6b9 100644
--- a/KubeOS-Rust/proxy/Cargo.toml
+++ b/KubeOS-Rust/proxy/Cargo.toml
@@ -6,6 +6,13 @@ name = "proxy"
version = "0.1.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+[lib]
+name = "drain"
+path = "src/drain.rs"
+
+[[bin]]
+name = "proxy"
+path = "src/main.rs"
[dependencies]
anyhow = "1.0.44"
diff --git a/KubeOS-Rust/proxy/src/controller/controller.rs b/KubeOS-Rust/proxy/src/controller/controller.rs
index e7ee9f9..b2bb332 100644
--- a/KubeOS-Rust/proxy/src/controller/controller.rs
+++ b/KubeOS-Rust/proxy/src/controller/controller.rs
@@ -13,6 +13,7 @@
use std::{collections::HashMap, env};
use anyhow::Result;
+use drain::drain_os;
use k8s_openapi::api::core::v1::Node;
use kube::{
api::{Api, PostParams},
@@ -29,7 +30,6 @@ use super::{
agentclient::{AgentMethod, ConfigInfo, KeyInfo, Sysconfig, UpgradeInfo},
apiclient::ApplyApi,
crd::{Configs, Content, OSInstance, OS},
- drain::drain_os,
utils::{check_version, get_config_version, ConfigOperation, ConfigType},
values::{
LABEL_UPGRADING, NODE_STATUS_CONFIG, NODE_STATUS_IDLE, OPERATION_TYPE_ROLLBACK, OPERATION_TYPE_UPGRADE,
@@ -340,7 +340,7 @@ impl<T: ApplyApi, U: AgentMethod> ProxyController<T, U> {
}
async fn drain_node(&self, node_name: &str, force: bool) -> Result<(), Error> {
- use crate::controller::drain::error::DrainError::*;
+ use drain::error::DrainError::*;
match drain_os(&self.k8s_client.clone(), node_name, force).await {
Err(DeletePodsError { errors, .. }) => Err(Error::DrainNodeError { value: errors.join("; ") }),
_ => Ok(()),
diff --git a/KubeOS-Rust/proxy/src/controller/mod.rs b/KubeOS-Rust/proxy/src/controller/mod.rs
index 384d74b..73be45c 100644
--- a/KubeOS-Rust/proxy/src/controller/mod.rs
+++ b/KubeOS-Rust/proxy/src/controller/mod.rs
@@ -16,7 +16,6 @@ mod apiclient;
mod apiserver_mock;
mod controller;
mod crd;
-mod drain;
mod utils;
mod values;
diff --git a/KubeOS-Rust/proxy/src/controller/values.rs b/KubeOS-Rust/proxy/src/controller/values.rs
index fe43851..dec905a 100644
--- a/KubeOS-Rust/proxy/src/controller/values.rs
+++ b/KubeOS-Rust/proxy/src/controller/values.rs
@@ -31,15 +31,3 @@ pub const SOCK_PATH: &str = "/run/os-agent/os-agent.sock";
pub const REQUEUE_NORMAL: ReconcilerAction = ReconcilerAction { requeue_after: Some(Duration::from_secs(15)) };
pub const REQUEUE_ERROR: ReconcilerAction = ReconcilerAction { requeue_after: Some(Duration::from_secs(1)) };
-
-pub const MAX_EVICT_POD_NUM: usize = 5;
-
-pub const EVERY_EVICTION_RETRY: Duration = Duration::from_secs(5);
-
-pub const EVERY_DELETION_CHECK: Duration = Duration::from_secs(5);
-
-pub const TIMEOUT: Duration = Duration::from_secs(u64::MAX);
-
-pub const RETRY_BASE_DELAY: Duration = Duration::from_millis(100);
-pub const RETRY_MAX_DELAY: Duration = Duration::from_secs(20);
-pub const MAX_RETRIES_TIMES: usize = 10;
diff --git a/KubeOS-Rust/proxy/src/controller/drain.rs b/KubeOS-Rust/proxy/src/drain.rs
similarity index 97%
rename from KubeOS-Rust/proxy/src/controller/drain.rs
rename to KubeOS-Rust/proxy/src/drain.rs
index ddc38ae..09cf662 100644
--- a/KubeOS-Rust/proxy/src/controller/drain.rs
+++ b/KubeOS-Rust/proxy/src/drain.rs
@@ -29,10 +29,14 @@ use self::error::{
DrainError::{DeletePodsError, GetPodListsError, WaitDeletionError},
EvictionError::{EvictionErrorNoRetry, EvictionErrorRetry},
};
-use super::values::{
- EVERY_DELETION_CHECK, EVERY_EVICTION_RETRY, MAX_EVICT_POD_NUM, MAX_RETRIES_TIMES, RETRY_BASE_DELAY,
- RETRY_MAX_DELAY, TIMEOUT,
-};
+
+pub const MAX_EVICT_POD_NUM: usize = 5;
+pub const EVERY_EVICTION_RETRY: Duration = Duration::from_secs(5);
+pub const EVERY_DELETION_CHECK: Duration = Duration::from_secs(5);
+pub const TIMEOUT: Duration = Duration::from_secs(u64::MAX);
+pub const RETRY_BASE_DELAY: Duration = Duration::from_millis(100);
+pub const RETRY_MAX_DELAY: Duration = Duration::from_secs(20);
+pub const MAX_RETRIES_TIMES: usize = 10;
pub async fn drain_os(client: &Client, node_name: &str, force: bool) -> Result<(), error::DrainError> {
let pods_list = get_pods_deleted(client, node_name, force).await?;
diff --git a/KubeOS-Rust/proxy/tests/common/mod.rs b/KubeOS-Rust/proxy/tests/common/mod.rs
new file mode 100644
index 0000000..8257759
--- /dev/null
+++ b/KubeOS-Rust/proxy/tests/common/mod.rs
@@ -0,0 +1,63 @@
+use std::process::{Command, Stdio};
+
+use anyhow::Result;
+use k8s_openapi::api::core::v1::Node;
+use kube::{
+ api::ResourceExt,
+ client::Client,
+ config::{Config, KubeConfigOptions, Kubeconfig},
+ Api,
+};
+use manager::utils::{CommandExecutor, RealCommandExecutor};
+
+pub const CLUSTER: &str = "kubeos-test";
+
+pub fn run_command(cmd: &str, args: &[&str]) -> Result<()> {
+ let output = Command::new(cmd).args(args).stdout(Stdio::inherit()).stderr(Stdio::inherit()).output()?;
+ if !output.status.success() {
+ println!("failed to run command: {} {}\n", cmd, args.join(" "));
+ }
+ Ok(())
+}
+
+pub async fn setup() -> Result<Client> {
+ // set PATH variable
+ let path = std::env::var("PATH").unwrap();
+ let new_path = format!("{}:{}", path, "../../bin");
+ std::env::set_var("PATH", new_path);
+
+ // create cluster
+ let executor = RealCommandExecutor {};
+ println!("Creating cluster");
+ run_command("bash", &["./tests/setup/setup_test_env.sh"]).expect("failed to create cluster");
+
+ // connect to the cluster
+ let kind_config = executor.run_command_with_output("kind", &["get", "kubeconfig", "-n", CLUSTER]).unwrap();
+ let kubeconfig = Kubeconfig::from_yaml(kind_config.as_str()).expect("failed to parse kubeconfig");
+ let options = KubeConfigOptions::default();
+ let config = Config::from_custom_kubeconfig(kubeconfig, &&options).await.expect("failed to create config");
+ let client = Client::try_from(config).expect("failed to create client");
+ // list all nodes
+ let nodes: Api<Node> = Api::all(client.clone());
+ let node_list = nodes.list(&Default::default()).await.expect("failed to list nodes");
+ for n in node_list {
+ println!("Found Node: {}", n.name());
+ }
+ // check node status
+ let node = nodes.get("kubeos-test-worker").await.unwrap();
+ let status = node.status.unwrap();
+ let conditions = status.conditions.unwrap();
+ for c in conditions {
+ if c.type_ == "Ready" {
+ assert_eq!(c.status, "True");
+ }
+ }
+ println!("Cluster ready");
+ Ok(client)
+}
+
+pub fn clean_env() {
+ let executor = RealCommandExecutor {};
+ println!("Cleaning cluster");
+ executor.run_command("kind", &["delete", "clusters", CLUSTER]).expect("failed to clean cluster");
+}
diff --git a/KubeOS-Rust/proxy/tests/drain_test.rs b/KubeOS-Rust/proxy/tests/drain_test.rs
new file mode 100644
index 0000000..2f4f150
--- /dev/null
+++ b/KubeOS-Rust/proxy/tests/drain_test.rs
@@ -0,0 +1,41 @@
+mod common;
+
+use common::*;
+use drain::drain_os;
+use k8s_openapi::api::core::v1::{Node, Pod};
+use kube::Api;
+
+#[tokio::test]
+#[ignore = "integration test"]
+async fn test_drain() {
+ let client = setup().await.unwrap();
+ // drain node
+ let nodes: Api<Node> = Api::all(client.clone());
+ let node_name = "kubeos-test-worker";
+ println!("cordon node");
+ nodes.cordon(node_name).await.unwrap();
+ println!("drain node");
+ drain_os(&client, node_name, true).await.unwrap();
+
+ // assert unschedulable
+ println!("check node unschedulable");
+ let node = nodes.get(node_name).await.unwrap();
+ if let Some(spec) = node.spec {
+ assert_eq!(spec.unschedulable, Some(true));
+ } else {
+ panic!("node spec is none");
+ }
+ // list all pods on kubeos-test-worker node and all pods should belong to daemonset
+ println!("list all pods on kubeos-test-worker node");
+ let pods: Api<Pod> = Api::all(client.clone());
+ let pod_list = pods.list(&Default::default()).await.unwrap();
+ // check the pod is from daemonset
+ for p in pod_list {
+ if p.spec.unwrap().node_name.unwrap() == node_name {
+ assert_eq!(p.metadata.owner_references.unwrap()[0].kind, "DaemonSet");
+ }
+ }
+ nodes.uncordon(node_name).await.unwrap();
+
+ clean_env()
+}
diff --git a/KubeOS-Rust/proxy/tests/setup/kind-config.yaml b/KubeOS-Rust/proxy/tests/setup/kind-config.yaml
new file mode 100644
index 0000000..0fe29e7
--- /dev/null
+++ b/KubeOS-Rust/proxy/tests/setup/kind-config.yaml
@@ -0,0 +1,5 @@
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+- role: worker
\ No newline at end of file
diff --git a/KubeOS-Rust/proxy/tests/setup/resources.yaml b/KubeOS-Rust/proxy/tests/setup/resources.yaml
new file mode 100644
index 0000000..0e449d5
--- /dev/null
+++ b/KubeOS-Rust/proxy/tests/setup/resources.yaml
@@ -0,0 +1,102 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: example-daemonset
+spec:
+ selector:
+ matchLabels:
+ name: example-daemonset
+ template:
+ metadata:
+ labels:
+ name: example-daemonset
+ spec:
+ containers:
+ - name: busybox
+ image: busybox:stable
+ command: ["/bin/sh", "-c", "sleep 3600"]
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-with-local-storage
+spec:
+ containers:
+ - name: busybox
+ image: busybox:stable
+ command: ["/bin/sh", "-c", "sleep 3600"]
+ volumeMounts:
+ - mountPath: "/data"
+ name: local-volume
+ volumes:
+ - name: local-volume
+ emptyDir: {}
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: standalone-pod
+spec:
+ containers:
+ - name: busybox
+ image: busybox:stable
+ command: ["/bin/sh", "-c", "sleep 3600"]
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: example-deployment
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: example
+ template:
+ metadata:
+ labels:
+ app: example
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:alpine
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ preference:
+ matchExpressions:
+ - key: "node-role.kubernetes.io/control-plane"
+ operator: DoesNotExist
+ tolerations:
+ - key: "node-role.kubernetes.io/master"
+ operator: "Exists"
+ - key: "node-role.kubernetes.io/control-plane"
+ operator: "Exists"
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: example-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: example
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: resource-intensive-pod
+spec:
+ containers:
+ - name: busybox
+ image: busybox:stable
+ command: ["/bin/sh", "-c", "sleep 3600"]
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "500m"
+ limits:
+ memory: "512Mi"
+ cpu: "1000m"
+
diff --git a/KubeOS-Rust/proxy/tests/setup/setup_test_env.sh b/KubeOS-Rust/proxy/tests/setup/setup_test_env.sh
new file mode 100644
index 0000000..d24d8e0
--- /dev/null
+++ b/KubeOS-Rust/proxy/tests/setup/setup_test_env.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+# this bash script executes in proxy directory
+
+set -Eeuxo pipefail
+
+# Define variables
+KIND_VERSION="v0.19.0"
+KUBECTL_VERSION="v1.24.15"
+KIND_CLUSTER_NAME="kubeos-test"
+DOCKER_IMAGES=("busybox:stable" "nginx:alpine" "kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab")
+NODE_IMAGE="kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab"
+RESOURCE="./tests/setup/resources.yaml"
+KIND_CONFIG="./tests/setup/kind-config.yaml"
+BIN_PATH="../../bin/"
+ARCH=$(uname -m)
+
+# Install kind and kubectl
+install_bins() {
+ # if bin dir not exist then create
+ if [ ! -d "${BIN_PATH}" ]; then
+ mkdir -p "${BIN_PATH}"
+ fi
+ if [ ! -f "${BIN_PATH}"kind ]; then
+ echo "Installing Kind..."
+ # For AMD64 / x86_64
+ if [ "$ARCH" = x86_64 ]; then
+ # add proxy if you are behind proxy
+ curl -Lo "${BIN_PATH}"kind https://kind.sigs.k8s.io/dl/"${KIND_VERSION}"/kind-linux-amd64
+ fi
+ # For ARM64
+ if [ "$ARCH" = aarch64 ]; then
+ curl -Lo "${BIN_PATH}"kind https://kind.sigs.k8s.io/dl/"${KIND_VERSION}"/kind-linux-arm64
+ fi
+ chmod +x "${BIN_PATH}"kind
+ fi
+ if [ ! -f "${BIN_PATH}"kubectl ]; then
+ echo "Installing kubectl..."
+ if [ "$ARCH" = x86_64 ]; then
+ curl -Lo "${BIN_PATH}"kubectl "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
+ fi
+ if [ "$ARCH" = aarch64 ]; then
+ curl -Lo "${BIN_PATH}"kubectl "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/arm64/kubectl"
+ fi
+ chmod +x "${BIN_PATH}"kubectl
+ fi
+ export PATH=$PATH:"${BIN_PATH}"
+}
+
+# Create Kind Cluster
+create_cluster() {
+ echo "Creating Kind cluster..."
+ for image in "${DOCKER_IMAGES[@]}"; do
+ docker pull "$image"
+ done
+ kind create cluster --name "${KIND_CLUSTER_NAME}" --config "${KIND_CONFIG}" --image "${NODE_IMAGE}"
+}
+
+# Load Docker image into Kind cluster
+load_docker_image() {
+ echo "Loading Docker image into Kind cluster..."
+ DOCKER_IMAGE=$(printf "%s " "${DOCKER_IMAGES[@]:0:2}")
+ kind load docker-image ${DOCKER_IMAGE} --name "${KIND_CLUSTER_NAME}"
+}
+
+# Apply Kubernetes resource files
+apply_k8s_resources() {
+ echo "Applying Kubernetes resources from ${RESOURCE}..."
+ kubectl apply -f "${RESOURCE}"
+ echo "Waiting for nodes getting ready..."
+ sleep 40s
+}
+
+main() {
+ export no_proxy=localhost,127.0.0.1
+ install_bins
+ create_cluster
+ load_docker_image
+ apply_k8s_resources
+}
+
+main
--
2.34.1