From 4e10db2ebab53f560c67514522703df3e37026a9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franti=C5=A1ek=20=C5=98ezn=C3=AD=C4=8Dek?=
 <246254@mail.muni.cz>
Date: Thu, 15 Feb 2024 15:12:31 +0100
Subject: [PATCH] feat: ceph low-level manipulation scripts added

---
 ci/ceph-accessible.sh                | 17 ++++++++++++++++
 ci/ceph-rbd-image-clone.sh           | 29 ++++++++++++++++++++++++++++
 ci/ceph-rbd-image-copy.sh            | 26 +++++++++++++++++++++++++
 ci/ceph-rbd-image-delete.sh          | 23 ++++++++++++++++++++++
 ci/ceph-rbd-image-exists.sh          | 20 +++++++++++++++++++
 ci/ceph-rbd-image-flatten.sh         | 23 ++++++++++++++++++++++
 ci/ceph-rbd-image-snapshot-exists.sh | 24 +++++++++++++++++++++++
 7 files changed, 162 insertions(+)
 create mode 100755 ci/ceph-accessible.sh
 create mode 100755 ci/ceph-rbd-image-clone.sh
 create mode 100755 ci/ceph-rbd-image-copy.sh
 create mode 100755 ci/ceph-rbd-image-delete.sh
 create mode 100755 ci/ceph-rbd-image-exists.sh
 create mode 100755 ci/ceph-rbd-image-flatten.sh
 create mode 100755 ci/ceph-rbd-image-snapshot-exists.sh

diff --git a/ci/ceph-accessible.sh b/ci/ceph-accessible.sh
new file mode 100755
index 0000000..0ac2b8f
--- /dev/null
+++ b/ci/ceph-accessible.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+# ceph-accessible.sh
+# returns 0 if RBD image exists
+
+set -eo pipefail
+
+CEPH_CLIENT_DIR="/root/migrator"
+CEPH_USER="${CEPH_USER:-"client.migrator"}"
+CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
+CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
+
+ceph --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} status &>/dev/null
+
+
+
+
diff --git a/ci/ceph-rbd-image-clone.sh b/ci/ceph-rbd-image-clone.sh
new file mode 100755
index 0000000..60ee872
--- /dev/null
+++ b/ci/ceph-rbd-image-clone.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+# ceph-rbd-image-clone.sh <ceph-src-pool-name> <ostack-src-volume-id> <ostack-src-snapshot-id> <ceph-dst-pool-name> <dst-ceph-rbd-image-name>
+# returns 0 if RBD clone succeeds
+
+set -eo pipefail
+
+CEPH_CLIENT_DIR="/root/migrator"
+CEPH_USER="${CEPH_USER:-"client.cinder"}"
+CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
+CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
+
+CEPH_SRC_POOL="$1"
+OSTACK_SRC_VOLUME_ID="$2"
+OSTACK_SRC_SNAPSHOT_ID="$3"
+CEPH_DST_POOL="$4"
+CEPH_DST_RBD_IMAGE_NAME="$5"
+
+test -n "${CEPH_SRC_POOL}"
+test -n "${OSTACK_SRC_VOLUME_ID}"
+test -n "${OSTACK_SRC_SNAPSHOT_ID}"
+test -n "${CEPH_DST_POOL}"
+test -n "${CEPH_DST_RBD_IMAGE_NAME}"
+
+SRC_RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_SRC_POOL} | grep -E "^(volume.)?${OSTACK_SRC_VOLUME_ID}$")"
+SRC_SNAPSHOT_NAME="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} snap ls ${CEPH_SRC_POOL}/${SRC_RBD_IMAGE} | grep -Eo "(snapshot.)?${OSTACK_SRC_SNAPSHOT_ID}")"
+
+rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} clone ${CEPH_SRC_POOL}/${SRC_RBD_IMAGE}@${SRC_SNAPSHOT_NAME} ${CEPH_DST_POOL}/${CEPH_DST_RBD_IMAGE_NAME}
+
diff --git a/ci/ceph-rbd-image-copy.sh b/ci/ceph-rbd-image-copy.sh
new file mode 100755
index 0000000..66870ff
--- /dev/null
+++ b/ci/ceph-rbd-image-copy.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# ceph-rbd-image-copy.sh <ceph-src-pool-name> <ostack-src-volume-id> <ceph-dst-pool-name> <dst-ceph-rbd-image-name>
+# returns 0 if RBD copy suceeds
+
+set -eo pipefail
+
+CEPH_CLIENT_DIR="/root/migrator"
+CEPH_USER="${CEPH_USER:-"client.cinder"}"
+CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
+CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
+
+CEPH_SRC_POOL="$1"
+OSTACK_SRC_VOLUME_ID="$2"
+CEPH_DST_POOL="$3"
+CEPH_DST_RBD_IMAGE_NAME="$4"
+
+test -n "${CEPH_SRC_POOL}"
+test -n "${OSTACK_SRC_VOLUME_ID}"
+test -n "${CEPH_DST_POOL}"
+test -n "${CEPH_DST_RBD_IMAGE_NAME}"
+
+SRC_RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_SRC_POOL} | grep -E "^(volume.)?${OSTACK_SRC_VOLUME_ID}$")"
+
+rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} cp ${CEPH_SRC_POOL}/${SRC_RBD_IMAGE} ${CEPH_DST_POOL}/${CEPH_DST_RBD_IMAGE_NAME}
+
diff --git a/ci/ceph-rbd-image-delete.sh b/ci/ceph-rbd-image-delete.sh
new file mode 100755
index 0000000..c544127
--- /dev/null
+++ b/ci/ceph-rbd-image-delete.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+# ceph-rbd-image-delete.sh <ceph-pool-name> <ostack-volume-id>
+# returns 0 if RBD image exists and prints its name
+
+set -eo pipefail
+
+CEPH_CLIENT_DIR="/root/migrator"
+CEPH_USER="${CEPH_USER:-"client.migrator"}"
+CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
+CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
+
+CEPH_POOL="$1"
+OSTACK_VOLUME_ID="$2"
+
+test -n "${CEPH_POOL}"
+test -n "${OSTACK_VOLUME_ID}"
+
+RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_POOL} | grep -E "^(volume.)?${OSTACK_VOLUME_ID}$")"
+
+if [ -n "${RBD_IMAGE}" ]; then
+    rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} rm "${CEPH_POOL}/${RBD_IMAGE}"
+fi
diff --git a/ci/ceph-rbd-image-exists.sh b/ci/ceph-rbd-image-exists.sh
new file mode 100755
index 0000000..458e1e0
--- /dev/null
+++ b/ci/ceph-rbd-image-exists.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+# ceph-rbd-image-exists.sh <ceph-pool-name> <ostack-volume-id>
+# returns 0 if RBD image exists and prints its name
+
+set -eo pipefail
+
+CEPH_CLIENT_DIR="/root/migrator"
+CEPH_USER="${CEPH_USER:-"client.migrator"}"
+CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
+CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
+
+CEPH_POOL="$1"
+OSTACK_VOLUME_ID="$2"
+
+test -n "${CEPH_POOL}"
+test -n "${OSTACK_VOLUME_ID}"
+
+rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_POOL} | grep -E "^(volume.)?${OSTACK_VOLUME_ID}$"
+
diff --git a/ci/ceph-rbd-image-flatten.sh b/ci/ceph-rbd-image-flatten.sh
new file mode 100755
index 0000000..3aec3ab
--- /dev/null
+++ b/ci/ceph-rbd-image-flatten.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+# ceph-rbd-image-flatten.sh <ceph-pool-name> <ostack-volume-id>
+# returns 0 if RBD image flattens
+
+set -eo pipefail
+
+CEPH_CLIENT_DIR="/root/migrator"
+CEPH_USER="${CEPH_USER:-"client.migrator"}"
+CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
+CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
+
+CEPH_POOL="$1"
+OSTACK_VOLUME_ID="$2"
+
+test -n "${CEPH_POOL}"
+test -n "${OSTACK_VOLUME_ID}"
+
+RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_POOL} | grep -E "^(volume.)?${OSTACK_VOLUME_ID}$")"
+
+test -n "${RBD_IMAGE}"
+
+rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} flatten "${CEPH_POOL}/${RBD_IMAGE}"
diff --git a/ci/ceph-rbd-image-snapshot-exists.sh b/ci/ceph-rbd-image-snapshot-exists.sh
new file mode 100755
index 0000000..f8d486e
--- /dev/null
+++ b/ci/ceph-rbd-image-snapshot-exists.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+# ceph-rbd-image-snapshot-exists.sh <ceph-pool-name> <ostack-volume-id> <ostack-snapshot-id>
+# returns 0 if RBD image snapshot exists and prints its name
+
+set -eo pipefail
+
+CEPH_CLIENT_DIR="/root/migrator"
+CEPH_USER="${CEPH_USER:-"client.cinder"}"
+CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
+CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
+
+CEPH_POOL="$1"
+OSTACK_VOLUME_ID="$2"
+OSTACK_SNAPSHOT_ID="$3"
+
+test -n "${CEPH_POOL}"
+test -n "${OSTACK_VOLUME_ID}"
+test -n "${OSTACK_SNAPSHOT_ID}"
+
+RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_POOL} | grep -E "^(volume.)?${OSTACK_VOLUME_ID}$")"
+
+rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} snap ls ${CEPH_POOL}/${RBD_IMAGE} | grep -Eo "(snapshot.)?${OSTACK_SNAPSHOT_ID}"
+
-- 
GitLab