summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorErnesto Puerta <37327689+epuertat@users.noreply.github.com>2021-08-03 17:56:40 +0200
committerGitHub <noreply@github.com>2021-08-03 17:56:40 +0200
commitb748752acd1e8736a6614ce6594725ee3eaf8a7e (patch)
treedba957806ac08cb2c261658218b3f03bebc3fe7c
parenta75ca1736d967fa82e006ed719cd738de7f4fbc5 (diff)
parentc5ef706b0c6a15e6334034428743d0f71348592e (diff)
downloadceph-b748752acd1e8736a6614ce6594725ee3eaf8a7e.tar.gz
Merge pull request #42586 from rhcs-dashboard/wip-51995-octopus
octopus: mgr/dashboard: cephadm-e2e job script: improvements Reviewed-by: Waad Alkhoury <walkhour@redhat.com> Reviewed-by: Aashish Sharma <aasharma@redhat.com> Reviewed-by: Avan Thakkar <athakkar@redhat.com> Reviewed-by: Ernesto Puerta <epuertat@redhat.com> Reviewed-by: Nizamudeen A <nia@redhat.com> Reviewed-by: Pere Diaz Bou <pdiazbou@redhat.com>
-rw-r--r--src/pybind/mgr/dashboard/HACKING.rst18
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh17
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml11
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh70
-rwxr-xr-xsrc/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh79
5 files changed, 120 insertions, 75 deletions
diff --git a/src/pybind/mgr/dashboard/HACKING.rst b/src/pybind/mgr/dashboard/HACKING.rst
index cd841d20797..b50182c85ef 100644
--- a/src/pybind/mgr/dashboard/HACKING.rst
+++ b/src/pybind/mgr/dashboard/HACKING.rst
@@ -190,7 +190,14 @@ run-cephadm-e2e-tests.sh
Orchestrator backend behave correctly.
Prerequisites: you need to install `KCLI
-<https://kcli.readthedocs.io/en/latest/>`_ in your local machine.
+<https://kcli.readthedocs.io/en/latest/>`_ and Node.js in your local machine.
+
+Configure KCLI plan requirements::
+
+ $ sudo chown -R $(id -un) /var/lib/libvirt/images
+ $ mkdir -p /var/lib/libvirt/images/ceph-dashboard dashboard
+ $ kcli create pool -p /var/lib/libvirt/images/ceph-dashboard dashboard
+ $ kcli create network -c 192.168.100.0/24 dashboard
Note:
This script is aimed to be run as jenkins job so the cleanup is triggered only in a jenkins
@@ -199,10 +206,17 @@ Note:
Start E2E tests by running::
$ cd <your/ceph/repo/dir>
- $ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/dist src/pybind/mgr/dashboard/frontend/node_modules
+ $ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/{dist,node_modules,src/environments}
$ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
$ kcli delete plan -y ceph # After tests finish.
+You can also start a cluster in development mode and later run E2E tests by running::
+
+ $ ./src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh --dev-mode
+ $ # Work on your feature, bug fix, ...
+ $ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
+ $ # Remember to kill the npm build watch process i.e.: pkill -f "ng build"
+
Other running options
.....................
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
index fd836f7378e..af7ef81f43f 100755
--- a/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
+++ b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
@@ -2,21 +2,14 @@
export PATH=/root/bin:$PATH
mkdir /root/bin
-{% if ceph_dev_folder is defined %}
- cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
-{% else %}
- cd /root/bin
- curl --silent --remote-name --location https://raw.githubusercontent.com/ceph/ceph/octopus/src/cephadm/cephadm
-{% endif %}
+
+cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
chmod +x /root/bin/cephadm
mkdir -p /etc/ceph
mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}')
-{% if ceph_dev_folder is defined %}
- cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
-{% else %}
- cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate
-{% endif %}
-fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}')
+
+cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
+
{% for number in range(1, nodes) %}
ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no root@{{ prefix }}-node-0{{ number }}.{{ domain }}
{% endfor %}
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml
index 80273bbfe5a..60440972360 100755
--- a/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml
+++ b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml
@@ -1,7 +1,7 @@
parameters:
nodes: 3
- pool: default
- network: default
+ pool: ceph-dashboard
+ network: ceph-dashboard
domain: cephlab.com
prefix: ceph
numcpus: 1
@@ -26,15 +26,14 @@ parameters:
- {{ network }}
disks: {{ disks }}
pool: {{ pool }}
- {% if ceph_dev_folder is defined %}
sharedfolders: [{{ ceph_dev_folder }}]
- {% endif %}
+ files:
+ - bootstrap-cluster.sh
cmds:
- dnf -y install python3 chrony lvm2 podman
- sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config
- setenforce 0
{% if number == 0 %}
- scripts:
- - bootstrap-cluster.sh
+ - bash /root/bootstrap-cluster.sh
{% endif %}
{% endfor %}
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
index 90bfa8d9ebb..178c89f5ba6 100755
--- a/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
+++ b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
@@ -2,67 +2,23 @@
set -ex
-cleanup() {
- if [[ -n "$JENKINS_HOME" ]]; then
- printf "\n\nStarting cleanup...\n\n"
- kcli delete plan -y ceph || true
- sudo podman container prune -f
- printf "\n\nCleanup completed.\n\n"
- fi
-}
-
-on_error() {
- if [ "$1" != "0" ]; then
- printf "\n\nERROR $1 thrown on line $2\n\n"
- printf "\n\nCollecting info...\n\n"
- for vm_id in 0 1 2
- do
- local vm="ceph-node-0${vm_id}"
- printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
- kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
- printf "\n\nEnd of journalctl from VM ${vm}\n\n"
- printf "\n\nDisplaying podman logs:\n\n"
- kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
- done
- printf "\n\nTEST FAILED.\n\n"
- fi
-}
-
-trap 'on_error $? $LINENO' ERR
-trap 'cleanup $? $LINENO' EXIT
-
-sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
-
-: ${CEPH_DEV_FOLDER:=${PWD}}
-
-# Required to start dashboard.
-cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
-NG_CLI_ANALYTICS=false npm ci
-npm run build
-
-cd ${CEPH_DEV_FOLDER}
-kcli delete plan -y ceph || true
-kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml -P ceph_dev_folder=${CEPH_DEV_FOLDER} ceph
-
-while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
- sleep 30
- kcli list vm
- # Uncomment for debugging purposes.
- #kcli ssh -u root -- ceph-node-00 'podman ps -a'
- #kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
- kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
-done
-
-cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
-npx cypress info
-
: ${CYPRESS_BASE_URL:=''}
: ${CYPRESS_LOGIN_USER:='admin'}
: ${CYPRESS_LOGIN_PWD:='password'}
: ${CYPRESS_ARGS:=''}
+: ${DASHBOARD_PORT:='8443'}
+
+get_vm_ip () {
+ local ip=$(kcli info vm "$1" -f ip -v | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
+ echo -n $ip
+}
if [[ -z "${CYPRESS_BASE_URL}" ]]; then
- CYPRESS_BASE_URL="https://$(kcli info vm ceph-node-00 -f ip -v | sed -e 's/[^0-9.]//'):8443"
+ CEPH_NODE_00_IP="$(get_vm_ip ceph-node-00)"
+ if [[ -z "${CEPH_NODE_00_IP}" ]]; then
+ . "$(dirname $0)"/start-cluster.sh
+ fi
+ CYPRESS_BASE_URL="https://$(get_vm_ip ceph-node-00):${DASHBOARD_PORT}"
fi
export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD
@@ -78,4 +34,8 @@ cypress_run () {
npx cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config"
}
+: ${CEPH_DEV_FOLDER:=${PWD}}
+
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+
cypress_run "orchestrator/workflow/*-spec.ts"
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
new file mode 100755
index 00000000000..61775d0bac4
--- /dev/null
+++ b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+
+set -ex
+
+cleanup() {
+ set +x
+ if [[ -n "$JENKINS_HOME" ]]; then
+ printf "\n\nStarting cleanup...\n\n"
+ kcli delete plan -y ceph || true
+ docker container prune -f
+ printf "\n\nCleanup completed.\n\n"
+ fi
+}
+
+on_error() {
+ set +x
+ if [ "$1" != "0" ]; then
+ printf "\n\nERROR $1 thrown on line $2\n\n"
+ printf "\n\nCollecting info...\n\n"
+ for vm_id in 0 1 2
+ do
+ local vm="ceph-node-0${vm_id}"
+ printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
+ kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
+ printf "\n\nEnd of journalctl from VM ${vm}\n\n"
+ printf "\n\nDisplaying container logs:\n\n"
+ kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
+ done
+ printf "\n\nTEST FAILED.\n\n"
+ fi
+}
+
+trap 'on_error $? $LINENO' ERR
+trap 'cleanup $? $LINENO' EXIT
+
+sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
+
+: ${CEPH_DEV_FOLDER:=${PWD}}
+EXTRA_PARAMS=''
+DEV_MODE=''
+# Check script args/options.
+for arg in "$@"; do
+ shift
+ case "$arg" in
+ "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS="-P dev_mode=${DEV_MODE}" ;;
+ esac
+done
+
+kcli delete plan -y ceph || true
+
+# Build dashboard frontend (required to start the module).
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+NG_CLI_ANALYTICS=false npm ci
+FRONTEND_BUILD_OPTS='-- --prod'
+if [[ -n "${DEV_MODE}" ]]; then
+ FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
+fi
+npm run build ${FRONTEND_BUILD_OPTS} &
+
+cd ${CEPH_DEV_FOLDER}
+: ${VM_IMAGE:='fedora34'}
+: ${VM_IMAGE_URL:='https://fedora.mirror.liteserver.nl/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.qcow2'}
+kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
+kcli delete plan -y ceph || true
+kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
+ -P ceph_dev_folder=${CEPH_DEV_FOLDER} \
+ ${EXTRA_PARAMS} ceph
+
+: ${CLUSTER_DEBUG:=0}
+: ${DASHBOARD_CHECK_INTERVAL:=10}
+while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
+ sleep ${DASHBOARD_CHECK_INTERVAL}
+ kcli list vm
+ if [[ ${CLUSTER_DEBUG} != 0 ]]; then
+ kcli ssh -u root -- ceph-node-00 'podman ps -a'
+ kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
+ fi
+ kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
+done