Skip to content
Snippets Groups Projects
Commit 5c071e90 authored by Josef Smrčka's avatar Josef Smrčka
Browse files

Merge branch 'jsmrcka-migrations' into 'master'

feat: change volume migration procedure

See merge request !31
parents d826259b 970bb101
Branches
No related tags found
1 merge request!31feat: change volume migration procedure
Pipeline #517188 waiting for manual action
......@@ -6,12 +6,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [1.7.0] - 2024-10-04
### Changed
- Refactored `migrate_rbd_images` function in `project-migrator`. The change adds a limited ability to specify
the order of steps in the volume migration procedure: whether to restore the original state of the related VM
at the end of the procedure, or in the middle, right after creating snapshots of the volumes.
## [1.6.1] - 2024-09-20
### Fix
### Fix
- Fixed mapping of single flavor, to correct one.
## [1.6.0] - 2024-08-05
### Added
### Added
- Added mappin g for csirtmu.* flavors from g1. They are mapped to g2 flavors.
## [1.5.5] - 2024-08-05
......@@ -25,7 +31,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [1.5.3] - 2024-08-05
### Changed
- Changed generate-data-for-cummunication script - added possibility to specify migration date.
- This change is reflected in .gitlab-ci.yml
- This change is reflected in .gitlab-ci.yml
## [1.5.2] - 2024-07-25
### Fixed
......
......@@ -128,29 +128,16 @@ def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_sn
return stdout.splitlines(), stderr, ecode
def migrate_rbd_image(args, server_block_device_mapping):
""" migrate source (G1) ceph RBD image to destination (G2) ceph """
## G1: detect existing G1 RBD image
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
source_server_rbd_images, _, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
server_block_device_mapping['source']['ceph_rbd_image_name'])
log_or_assert(args, "G.01 Source OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.01 Source OpenStack VM RBD image exists - single image returned",
def get_ceph_rbd_image(args, pool_name, rbd_image_name, log_prefix):
""" CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh <pool_name> <rbd_image_name> """
source_server_rbd_images, _, ecode = ceph_rbd_image_exists(args, pool_name, rbd_image_name)
log_or_assert(args, f"{log_prefix} OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, f"{log_prefix} OpenStack VM RBD image exists - single image returned",
source_server_rbd_images and len(source_server_rbd_images) == 1, locals())
source_server_rbd_image = source_server_rbd_images[0]
## G2: find volume
# CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
destination_server_rbd_images, _, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
server_block_device_mapping['destination']['volume_id'])
log_or_assert(args, "G.02 Destination OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.02 Destination OpenStack VM RBD image exists - single image returned",
destination_server_rbd_images and len(destination_server_rbd_images) == 1, locals())
destination_server_rbd_image = destination_server_rbd_images[0]
return source_server_rbd_images[0]
def create_source_rbd_image_snapshot(args, server_block_device_mapping, source_server_rbd_image):
## G1: create RBD image protected snapshot
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
......@@ -182,7 +169,10 @@ def migrate_rbd_image(args, server_block_device_mapping):
"G.05 Source OpenStack VM RBD image snapshot exists "
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode == 0, locals())
return source_rbd_image_snapshot_name
def delete_destination_rbd_image(args, server_block_device_mapping, destination_server_rbd_image):
## G2: delete RBD image
# CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
## G2: confirm volume is deleted
......@@ -200,6 +190,8 @@ def migrate_rbd_image(args, server_block_device_mapping):
f"G.07 Destination OpenStack VM RBD image does not exist ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode != 0, locals())
def clone_source_rbd_image_snapshot(args, server_block_device_mapping, source_server_rbd_image, source_rbd_image_snapshot_name):
## G1: clone from snapshot
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
......@@ -221,7 +213,10 @@ def migrate_rbd_image(args, server_block_device_mapping):
log_or_assert(args,
f"G.09 Source OpenStack VM cloned RBD image exists ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
return source_rbd_cloned_image_name
def flatten_source_rbd_image_snapshot_clone(args, server_block_device_mapping, source_rbd_cloned_image_name):
## G1: flatten cloned RBD image
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
_, _, ecode = ceph_rbd_image_flatten(args,
......@@ -231,6 +226,8 @@ def migrate_rbd_image(args, server_block_device_mapping):
f"G.10 Source OpenStack VM cloned RBD image flatten successfully ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
def copy_source_rbd_image_snapshot_clone_to_destination_pool(args, server_block_device_mapping, source_rbd_cloned_image_name, destination_server_rbd_image):
## G1->G2: copy RBD image to target pool
# CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
# CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
......@@ -251,6 +248,8 @@ def migrate_rbd_image(args, server_block_device_mapping):
f"G.12 Destination OpenStack VM RBD image exists ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode == 0, locals())
def delete_source_rbd_image_snapshot_clone(args, server_block_device_mapping, source_rbd_cloned_image_name):
## G1: delete cloned RBD image
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
_, _, ecode = ceph_rbd_image_delete(args,
......@@ -266,6 +265,8 @@ def migrate_rbd_image(args, server_block_device_mapping):
f"G.14 Source OpenStack VM cloned RBD image does not exist anymore ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode != 0, locals())
def delete_source_rbd_image_snapshot(args, server_block_device_mapping, source_server_rbd_image, source_rbd_image_snapshot_name):
## G1: remove created snapshot
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
# CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
......@@ -294,3 +295,69 @@ def migrate_rbd_image(args, server_block_device_mapping):
"G.17 Source OpenStack VM RBD image snapshot does not exist anymore "
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode != 0, locals())
def migrate_rbd_images(args, server_block_device_mappings, post_rbd_snap_callback = None):
""" migrate source (G1) ceph RBD images to destination (G2) ceph """
block_device_migration_mappings = []
for server_block_device_mapping in server_block_device_mappings:
## G1: detect existing RBD image
source_server_rbd_image = get_ceph_rbd_image(args,
server_block_device_mapping['source']['ceph_pool_name'],
server_block_device_mapping['source']['ceph_rbd_image_name'],
"G.01 Source")
## G2: detect existing RBD image
destination_server_rbd_image = get_ceph_rbd_image(args,
server_block_device_mapping['destination']['ceph_pool_name'],
server_block_device_mapping['destination']['volume_id'],
"G.02 Destination")
source_rbd_image_snapshot_name = create_source_rbd_image_snapshot(args,
server_block_device_mapping,
source_server_rbd_image)
block_device_migration_mappings.append({
'server_block_device_mapping': server_block_device_mapping,
'source_server_rbd_image': source_server_rbd_image,
'destination_server_rbd_image': destination_server_rbd_image,
'source_rbd_image_snapshot_name': source_rbd_image_snapshot_name
})
# if defined, execute the callback
if post_rbd_snap_callback:
post_rbd_snap_callback['func'](**post_rbd_snap_callback['args'])
for block_device_migration_mapping in block_device_migration_mappings:
server_block_device_mapping = block_device_migration_mapping['server_block_device_mapping']
source_server_rbd_image = block_device_migration_mapping['source_server_rbd_image']
destination_server_rbd_image = block_device_migration_mapping['destination_server_rbd_image']
source_rbd_image_snapshot_name = block_device_migration_mapping['source_rbd_image_snapshot_name']
delete_destination_rbd_image(args,
server_block_device_mapping,
destination_server_rbd_image)
source_rbd_cloned_image_name = clone_source_rbd_image_snapshot(args,
server_block_device_mapping,
source_server_rbd_image,
source_rbd_image_snapshot_name)
flatten_source_rbd_image_snapshot_clone(args,
server_block_device_mapping,
source_rbd_cloned_image_name)
copy_source_rbd_image_snapshot_clone_to_destination_pool(args,
server_block_device_mapping,
source_rbd_cloned_image_name,
destination_server_rbd_image)
delete_source_rbd_image_snapshot_clone(args,
server_block_device_mapping,
source_rbd_cloned_image_name)
delete_source_rbd_image_snapshot(args,
server_block_device_mapping,
source_server_rbd_image,
source_rbd_image_snapshot_name)
......@@ -874,3 +874,15 @@ def compare_quota_values(value_1, value_2):
if val_1 < val_2:
return -1
return 0
def restore_source_server_status(args, source_project_conn, source_server_detail, source_server):
""" start server in source cloud (if necessary), wait for VM being back in the same state as at the beginning """
if source_server_detail.status != source_project_conn.compute.find_server(source_server.id).status and \
not args.source_servers_left_shutoff:
if source_server_detail.status == 'ACTIVE':
source_project_conn.compute.start_server(source_server_detail)
args.logger.info(f"F.34 Source OpenStack VM server (name:{source_server_detail.name}) requested to start")
else:
args.logger.warning(f"F.34 Source OpenStack VM server (name:{source_server_detail.name}) is not in expected state, "
f"but migrator does not know how to move to {source_server_detail.status} state")
......@@ -205,19 +205,26 @@ def main(args):
lib.log_or_assert(args, f"F.33 Source OpenStack VM server (name:{i_source_server_detail.name}) stopped (reached SHUTOFF state)",
lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'SHUTOFF') == "SHUTOFF")
# volume migration (browse i_server_block_device_mappings)
for i_server_block_device_mapping in i_server_block_device_mappings:
clib.migrate_rbd_image(args, i_server_block_device_mapping)
# start server in source cloud (if necessary), wait for VM being back in the same state as at the beginning
if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status and \
not args.source_servers_left_shutoff:
if i_source_server_detail.status == 'ACTIVE':
source_project_conn.compute.start_server(i_source_server_detail)
args.logger.info(f"F.34 Source OpenStack VM server (name:{i_source_server_detail.name}) requested to start")
else:
args.logger.warning(f"F.34 Source OpenStack VM server (name:{i_source_server_detail.name}) is not in expected state, "
f"but migrator does not know how to move to {i_source_server_detail.status} state")
restore_source_server_status_args = {
'args': args,
'source_project_conn': source_project_conn,
'source_server_detail': i_source_server_detail,
'source_server': i_source_server
}
post_rbd_snap_callback=None
if args.block_storage_volume_migration_mode == BLOCK_STORAGE_VOLUME_MIGRATION_MODE_VMON_AFTER_SNAP:
# start server in source cloud (if necessary)
post_rbd_snap_callback = {
'func': olib.restore_source_server_status,
'args': restore_source_server_status_args
}
# volumes migration (browse i_server_block_device_mappings)
clib.migrate_rbd_images(args, i_server_block_device_mappings, post_rbd_snap_callback)
# start server in source cloud (if necessary)
olib.restore_source_server_status(**restore_source_server_status_args)
# start server in destination cloud
i_destination_server = olib.create_dst_server(args,
......@@ -298,13 +305,16 @@ def main(args):
'ceph_rbd_image_name': i_source_volume.id},
'destination': {'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'volume_id': i_dst_volume.id}}
clib.migrate_rbd_image(args, i_volume_mapping)
clib.migrate_rbd_images(args, [i_volume_mapping])
i_dst_volume_detail = destination_project_conn.block_storage.find_volume(i_dst_volume.id)
lib.log_or_assert(args,
f"H.05 Destination OpenStack volume available (name:{i_dst_volume_detail.name}, id:{i_dst_volume_detail.id})",
i_dst_volume_detail.status == 'available')
BLOCK_STORAGE_VOLUME_MIGRATION_MODE_VMON_AFTER_SNAP="vmoff-snap-vmon-clone-flatten-copy-cleanup"
BLOCK_STORAGE_VOLUME_MIGRATION_MODE_VMON_AFTER_CLEANUP="vmoff-snap-clone-flatten-copy-cleanup-vmon"
# main() call (argument parsing)
# -------------------------------------------------------------------------------------------------
if __name__ == "__main__":
......@@ -359,6 +369,9 @@ if __name__ == "__main__":
help='(Optional) Reuse matching already migrated volumes whem migration steps failed after volume transfer (step G17).')
AP.add_argument('--migrate-volume-snapshots', default=False, required=False, choices=lib.BOOLEAN_CHOICES,
help='(Optional) Migrate OpenStack volume snapshots.')
AP.add_argument('--block-storage-volume-migration-mode', default=BLOCK_STORAGE_VOLUME_MIGRATION_MODE_VMON_AFTER_SNAP, required=False,
choices=[BLOCK_STORAGE_VOLUME_MIGRATION_MODE_VMON_AFTER_SNAP, BLOCK_STORAGE_VOLUME_MIGRATION_MODE_VMON_AFTER_CLEANUP],
help='(Optional) Mode which determines order of steps performed during volume migration (steps G.05-G.17, F34).')
AP.add_argument('--validation-a-source-server-id', default=None, required=True,
help='For validation any server ID from source OpenStack project')
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment