Skip to content
Snippets Groups Projects
Commit cf1fe110 authored by František Řezníček's avatar František Řezníček
Browse files

Merge branch 'freznicek-rbd-snaps-migration' into 'master'

feat: allow migration of RBD snapshots

See merge request !18
parents aa5882b5 142b5930
No related branches found
Tags v1.3.0
1 merge request!18feat: allow migration of RBD snapshots
Pipeline #472892 passed with stage
in 30 seconds
...@@ -6,6 +6,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ...@@ -6,6 +6,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [1.3.0] - 2024-07-08
### Fixed
- Added way how to migrate RBD snapshot entities
## [1.2.5] - 2024-07-03 ## [1.2.5] - 2024-07-03
### Fixed ### Fixed
- Handle situation when group project network is mapped but not existent - Handle situation when group project network is mapped but not existent
......
...@@ -20,6 +20,8 @@ test -n "${OSTACK_SRC_VOLUME_ID}" ...@@ -20,6 +20,8 @@ test -n "${OSTACK_SRC_VOLUME_ID}"
test -n "${CEPH_DST_POOL}" test -n "${CEPH_DST_POOL}"
test -n "${CEPH_DST_RBD_IMAGE_NAME}" test -n "${CEPH_DST_RBD_IMAGE_NAME}"
test "$#" == "4"
SRC_RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_SRC_POOL} | grep -E "^(volume.)?${OSTACK_SRC_VOLUME_ID}$")" SRC_RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_SRC_POOL} | grep -E "^(volume.)?${OSTACK_SRC_VOLUME_ID}$")"
rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} cp ${CEPH_SRC_POOL}/${SRC_RBD_IMAGE} ${CEPH_DST_POOL}/${CEPH_DST_RBD_IMAGE_NAME} rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} cp ${CEPH_SRC_POOL}/${SRC_RBD_IMAGE} ${CEPH_DST_POOL}/${CEPH_DST_RBD_IMAGE_NAME}
......
#!/usr/bin/env bash
# ceph-rbd-image-deepcopy.sh <ceph-src-pool-name> <ostack-src-volume-id> <ceph-dst-pool-name> <dst-ceph-rbd-image-name>
# returns 0 if RBD copy suceeds
set -eo pipefail
CEPH_CLIENT_DIR="/root/migrator"
CEPH_USER="${CEPH_USER:-"client.cinder"}"
CEPH_KEYRING="${CEPH_CLIENT_DIR}/${CEPH_USER}.keyring"
CEPH_CONFIG="${CEPH_CLIENT_DIR}/ceph.conf"
CEPH_SRC_POOL="$1"
OSTACK_SRC_VOLUME_ID="$2"
CEPH_DST_POOL="$3"
CEPH_DST_RBD_IMAGE_NAME="$4"
test -n "${CEPH_SRC_POOL}"
test -n "${OSTACK_SRC_VOLUME_ID}"
test -n "${CEPH_DST_POOL}"
test -n "${CEPH_DST_RBD_IMAGE_NAME}"
test "$#" == "4"
SRC_RBD_IMAGE="$(rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} ls ${CEPH_SRC_POOL} | grep -E "^(volume.)?${OSTACK_SRC_VOLUME_ID}$")"
rbd --conf="${CEPH_CONFIG}" --name "${CEPH_USER}" --keyring=${CEPH_KEYRING} deep cp ${CEPH_SRC_POOL}/${SRC_RBD_IMAGE} ${CEPH_DST_POOL}/${CEPH_DST_RBD_IMAGE_NAME}
...@@ -79,7 +79,8 @@ def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_ ...@@ -79,7 +79,8 @@ def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_
def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name): def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name):
""" copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}""" """ copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name) ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-copy.sh') script_path = os.path.join(args.ceph_migrator_host_base_dir,
'ceph-rbd-image-deepcopy.sh' if args.migrate_volume_snapshots else 'ceph-rbd-image-copy.sh')
cmd = f"CEPH_USER={ceph_client_name} {script_path} {src_pool_name} {src_rbd_image_name} {dst_pool_name} {dst_rbd_image_name}" cmd = f"CEPH_USER={ceph_client_name} {script_path} {src_pool_name} {src_rbd_image_name} {dst_pool_name} {dst_rbd_image_name}"
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host, stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user, args.ceph_migrator_user,
......
...@@ -357,6 +357,9 @@ if __name__ == "__main__": ...@@ -357,6 +357,9 @@ if __name__ == "__main__":
AP.add_argument('--migrate-reuse-already-migrated-volumes', default=False, required=False, AP.add_argument('--migrate-reuse-already-migrated-volumes', default=False, required=False,
choices=["True", "true", "False", "false"], choices=["True", "true", "False", "false"],
help='(Optional) Reuse matching already migrated volumes whem migration steps failed after volume transfer (step G17).') help='(Optional) Reuse matching already migrated volumes whem migration steps failed after volume transfer (step G17).')
AP.add_argument('--migrate-volume-snapshots', default=False, required=False,
choices=["True", "true", "False", "false"],
help='(Optional) Migrate OpenStack volume snapshots.')
AP.add_argument('--validation-a-source-server-id', default=None, required=True, AP.add_argument('--validation-a-source-server-id', default=None, required=True,
help='For validation any server ID from source OpenStack project') help='For validation any server ID from source OpenStack project')
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment