Skip to content
Snippets Groups Projects
Commit de2338dc authored by František Řezníček's avatar František Řezníček
Browse files

feat: add possibility to migrate standalone volumes

parent 4e4018e0
Branches
Tags
No related merge requests found
Pipeline #404738 waiting for manual action
......@@ -4,6 +4,8 @@ import json
import re
import pprint
import time
import os
import os.path
import xmltodict
import paramiko
......@@ -477,3 +479,192 @@ def get_server_floating_ip_port(ostack_connection, server):
if str(i_port_ip.get('ip_address')).startswith(i_ip_prefix):
return i_port
return None
def get_server_block_device_mapping(args, server_volume_attachment, server_volume, server_root_device_name):
""" return server block device mapping item """
return {'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
'volume_attachment_id': server_volume_attachment.id,
'volume_id': server_volume.id,
'ceph_pool_name': args.source_ceph_cinder_pool_name,
'ceph_rbd_image_name': server_volume.id},
'destination': {'volume_size': server_volume.size,
'volume_name': get_migrated_resource_name(args, server_volume.name),
'volume_description': server_volume.description,
'volume_id': None,
'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'device_name': os.path.basename(server_volume_attachment.device),
'volume_bootable': server_root_device_name == server_volume_attachment.device}}
def migrate_rbd_image(args, server_block_device_mapping):
""" migrate G1 ceph RBD image to G2 ceph """
## G1: detect existing G1 RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
source_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
server_block_device_mapping['source']['ceph_rbd_image_name'])
log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - single image returned",
source_server_rbd_images and len(source_server_rbd_images) == 1, locals())
source_server_rbd_image = source_server_rbd_images[0]
## G2: find volume
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
destination_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
server_block_device_mapping['destination']['volume_id'])
log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - single image returned",
destination_server_rbd_images and len(destination_server_rbd_images) == 1, locals())
destination_server_rbd_image = destination_server_rbd_images[0]
## G1: create RBD image protected snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
source_rbd_image_snapshot_name = f"g1-g2-migration-{source_server_rbd_image}"
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.3 Source OpenStack VM RBD image has non-colliding snapshot " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode != 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_create(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.4 Source OpenStack VM RBD image snapshot created " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.5 Source OpenStack VM RBD image snapshot exists " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode == 0, locals())
## G2: delete RBD image
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
## G2: confirm volume is deleted
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
stdout, stderr, ecode = ceph_rbd_image_delete(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.6 Destination OpenStack VM RBD image deletion succeeded ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.7 Destination OpenStack VM RBD image does not exist ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode != 0, locals())
## G1: clone from snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
source_rbd_cloned_image_name = f"g1-g2-migration-{source_server_rbd_image}"
stdout, stderr, ecode = ceph_rbd_image_clone(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
"G.8 Source OpenStack VM RBD image cloned succesfully " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name} -> {server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.9 Source OpenStack VM cloned RBD image exists ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
## G1: flatten cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
stdout, stderr, ecode = ceph_rbd_image_flatten(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.10 Source OpenStack VM cloned RBD image flatten successfully ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
## G1->G2: copy RBD image to target pool
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
stdout, stderr, ecode = ceph_rbd_image_copy(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
"G.11 Source OpenStack VM RBD image copied G1 -> G2 succesfully" \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name} -> {server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.12 Destination OpenStack VM RBD image exists ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode == 0, locals())
## G1: delete cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
stdout, stderr, ecode = ceph_rbd_image_delete(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.13 Source OpenStack VM RBD cloned image deletion succeeded ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.14 Source OpenStack VM cloned RBD image does not exist anymore ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode != 0, locals())
## G1: remove created snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.15 Source OpenStack VM RBD image snapshot still exists " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_delete(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.16 Source OpenStack VM RBD image snapshot deletion succeeeded " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.17 Source OpenStack VM RBD image snapshot does not exist anymore " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode != 0, locals())
......@@ -18,6 +18,7 @@ import logging
import math
import os
import os.path
import pprint
import sys
import lib
......@@ -213,18 +214,8 @@ def main(args):
# populate i_server_block_device_mappings
for i_source_server_volume_attachment in i_source_server_volume_attachments:
i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id)
i_server_block_device_mappings.append({'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
'volume_attachment_id': i_source_server_volume_attachment.id,
'volume_id': i_server_volume.id,
'ceph_pool_name': args.source_ceph_cinder_pool_name,
'ceph_rbd_image_name': i_server_volume.id},
'destination': {'volume_size': i_server_volume.size,
'volume_name': lib.get_migrated_resource_name(args, i_server_volume.name),
'volume_description': i_server_volume.description,
'volume_id': None,
'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'device_name': os.path.basename(i_source_server_volume_attachment.device),
'volume_bootable': i_source_server_root_device_name == i_source_server_volume_attachment.device}})
i_server_block_device_mappings.append(lib.get_server_block_device_mapping(args, i_source_server_volume_attachment,
i_server_volume, i_source_server_root_device_name))
else:
args.logger.info("F.22 Source OpenStack server - none of attached volumes is attached as the root partition. Seeking for root partition RBD image")
......@@ -245,6 +236,7 @@ def main(args):
# populate i_server_block_device_mappings
## initial disk
i_server_block_device_mappings.append({'source': {'block_storage_type': 'ceph-rbd-image',
'volume_id': i_source_ceph_ephemeral_rbd_image,
'ceph_pool_name': args.source_ceph_ephemeral_pool_name,
'ceph_rbd_image_name': i_source_ceph_ephemeral_rbd_image,
'ceph_rbd_image_size': i_source_ceph_ephemeral_rbd_image_size},
......@@ -259,35 +251,20 @@ def main(args):
## other disks attached to VM
for i_source_server_volume_attachment in i_source_server_volume_attachments:
i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id)
i_server_block_device_mappings.append({'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
'volume_attachment_id': i_source_server_volume_attachment.id,
'volume_id': i_server_volume.id,
'ceph_pool_name': args.source_ceph_cinder_pool_name,
'ceph_rbd_image_name': i_server_volume.id},
'destination': {'volume_size': i_server_volume.size,
'volume_name': lib.get_migrated_resource_name(args, i_server_volume.name),
'volume_description': i_server_volume.description,
'volume_id': None,
'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'device_name': os.path.basename(i_source_server_volume_attachment.device),
'volume_bootable': i_source_server_root_device_name == i_source_server_volume_attachment.device}})
i_server_block_device_mappings.append(lib.get_server_block_device_mapping(args, i_source_server_volume_attachment,
i_server_volume, i_source_server_root_device_name))
lib.log_or_assert(args, "F.26 Source OpenStack server - root partition detected",
i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['source'])
lib.log_or_assert(args, "F.27 Destination OpenStack server - root partition details generated",
i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['destination'])
#pprint.pprint(i_server_block_device_mappings)
#wait_for_keypress()
# volume creation in destination cloud
for i_destination_server_block_device_mapping in i_server_block_device_mappings:
i_new_volume_args = {'name': i_destination_server_block_device_mapping['destination']['volume_name'],
'size': i_destination_server_block_device_mapping['destination']['volume_size'],
'description': f"{i_destination_server_block_device_mapping['destination']['volume_description']}, g1-to-g2-migrated"}
'description': f"{i_destination_server_block_device_mapping['destination']['volume_description']}, g1-to-g2-migrated(g1-id:{i_destination_server_block_device_mapping['source']['volume_id']})"}
# TO BE REVISED: this seems to be the only way how to create bootable volume using openstacksdk
if i_destination_server_block_device_mapping['destination']['volume_bootable']:
i_new_volume_args['imageRef'] = destination_image.id
......@@ -312,161 +289,13 @@ def main(args):
# volume migration (browse i_server_block_device_mappings)
for i_server_block_device_mapping in i_server_block_device_mappings:
## G1: detect existing G1 RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
i_source_server_rbd_images, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_server_block_device_mapping['source']['ceph_rbd_image_name'])
lib.log_or_assert(args, "F.41 Source OpenStack VM RBD image exists - query succeeded", i_ecode == 0, locals())
lib.log_or_assert(args, "F.41 Source OpenStack VM RBD image exists - single image returned",
i_source_server_rbd_images and len(i_source_server_rbd_images) == 1, locals())
i_source_server_rbd_image = i_source_server_rbd_images[0]
## G2: find volume
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
i_destination_server_rbd_images, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_server_block_device_mapping['destination']['volume_id'])
lib.log_or_assert(args, "F.42 Destination OpenStack VM RBD image exists - query succeeded", i_ecode == 0, locals())
lib.log_or_assert(args, "F.42 Destination OpenStack VM RBD image exists - single image returned",
i_destination_server_rbd_images and len(i_destination_server_rbd_images) == 1, locals())
i_destination_server_rbd_image = i_destination_server_rbd_images[0]
## G1: create RBD image protected snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
i_source_rbd_image_snapshot_name = f"g1-g2-migration-{i_source_server_rbd_image}"
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.43 Source OpenStack VM RBD image has non-colliding snapshot " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name})",
i_ecode != 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_create(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.44 Source OpenStack VM RBD image snapshot created " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name})",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.45 Source OpenStack VM RBD image snapshot exists " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name})",
i_ecode == 0, locals())
## G2: delete RBD image
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
## G2: confirm volume is deleted
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_delete(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, f"F.46 Destination OpenStack VM RBD image deletion succeeded ({i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image})",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, f"F.47 Destination OpenStack VM RBD image does not exist ({i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image})",
i_ecode != 0, locals())
## G1: clone from snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
i_source_rbd_cloned_image_name = f"g1-g2-migration-{i_source_server_rbd_image}"
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_clone(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, "F.48 Source OpenStack VM RBD image cloned succesfully " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name} -> {i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.49 Source OpenStack VM cloned RBD image exists ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
i_ecode == 0, locals())
## G1: flatten cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_flatten(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.50 Source OpenStack VM cloned RBD image flatten successfully ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
i_ecode == 0, locals())
## G1->G2: copy RBD image to target pool
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_copy(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, "F.51 Source OpenStack VM RBD image copied G1 -> G2 succesfully" \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name} -> {i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image}",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, f"F.52 Destination OpenStack VM RBD image exists ({i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image})",
i_ecode == 0, locals())
## G1: delete cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_delete(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.53 Source OpenStack VM RBD cloned image deletion succeeded ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.54 Source OpenStack VM cloned RBD image does not exist anymore ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
i_ecode != 0, locals())
## G1: remove created snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.55 Source OpenStack VM RBD image snapshot still exists " \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name}",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_delete(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.56 Source OpenStack VM RBD image snapshot deletion succeeeded " \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name}",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.57 Source OpenStack VM RBD image snapshot does not exist anymore " \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name}",
i_ecode != 0, locals())
lib.migrate_rbd_image(args, i_server_block_device_mapping)
# start server in source cloud, wait for back being 'ACTIVE'
if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status:
if i_source_server_detail.status == 'ACTIVE':
source_project_conn.compute.start_server(i_source_server_detail)
lib.log_or_assert(args, "F.49 Source OpenStack VM server started back",
lib.log_or_assert(args, "F.34 Source OpenStack VM server started back",
lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'ACTIVE') == "ACTIVE",
locals())
......@@ -484,16 +313,16 @@ def main(args):
'boot_volume': i_server_block_device_mappings[0]['destination']['volume_id'],
'key_name': i_destination_server_keypair["name"],
'networks': [ {'uuid': i_network.id} for i_network in i_destination_server_networks ]}
lib.log_or_assert(args, "F.60 Destination OpenStack server arguments are generated with valid block-device-mapping",
lib.log_or_assert(args, "F.35 Destination OpenStack server arguments are generated with valid block-device-mapping",
i_destination_server_args['block_device_mapping_v2'], locals())
lib.log_or_assert(args, "F.60 Destination OpenStack server arguments are generated with valid network configuration",
lib.log_or_assert(args, "F.36 Destination OpenStack server arguments are generated with valid network configuration",
i_destination_server_args['networks'], locals())
#pprint.pprint(i_destination_server_args)
i_destination_server = destination_project_conn.compute.create_server(**i_destination_server_args)
lib.log_or_assert(args, "F.61 Destination OpenStack server is created", i_destination_server, locals())
lib.log_or_assert(args, "F.37 Destination OpenStack server is created", i_destination_server, locals())
i_destination_server = destination_project_conn.compute.wait_for_server(i_destination_server)
lib.log_or_assert(args, "F.62 Destination OpenStack server got ACTIVE",
lib.log_or_assert(args, "F.38 Destination OpenStack server got ACTIVE",
i_destination_server.status == 'ACTIVE', locals())
# add security groups to the destination server (if missing)
......@@ -503,12 +332,42 @@ def main(args):
if i_source_server_has_fip:
# add FIP as source VM has it
i_destination_server_fip = destination_project_conn.network.create_ip(floating_network_id=destination_fip_network.id)
lib.log_or_assert(args, "F.63 Destination OpenStack server FIP is created", i_destination_server_fip, locals())
lib.log_or_assert(args, "F.39 Destination OpenStack server FIP is created", i_destination_server_fip, locals())
i_destination_server_port = lib.get_server_floating_ip_port(destination_project_conn, i_destination_server)
lib.log_or_assert(args, "F.64 Destination OpenStack server FIP port is detected", i_destination_server_port, locals())
lib.log_or_assert(args, "F.40 Destination OpenStack server FIP port is detected", i_destination_server_port, locals())
destination_project_conn.network.add_ip_to_port(i_destination_server_port, i_destination_server_fip)
args.logger.info(f"F.66 Source OpenStack server name:{i_source_server_detail.name} migrated into destination one name:{i_destination_server.name} id:{i_destination_server.id}")
args.logger.info(f"F.41 Source OpenStack server name:{i_source_server_detail.name} migrated into destination one name:{i_destination_server.name} id:{i_destination_server.id}")
if args.explicit_volume_names:
for i_source_volume_name in args.explicit_volume_names:
i_source_volume = source_project_conn.block_storage.find_volume(i_source_volume_name)
if not i_source_volume:
args.logger.info(f"H.1 Source volume migration skipped as does not exist (name:{i_source_volume_name})")
continue
if i_source_volume.status != 'available':
args.logger.info(f"H.2 Source volume migration skipped as it is not in state available (name:{i_source_volume_name}, state:{i_source_volume.status}). " \
"Note in-use volumes are being migrated in VM server migration part.")
continue
i_dst_volume = destination_project_conn.block_storage.create_volume(name=lib.get_migrated_resource_name(args, i_source_volume.name),
size=i_source_volume.size,
description=f"{i_source_volume.description}, g1-to-g2-migrated(g1-id:{i_source_volume.id})")
lib.log_or_assert(args,
f"H.3 Destination OpenStack volume created (name:{i_dst_volume.name}, id:{i_dst_volume.id})", i_dst_volume)
i_dst_volume_status = lib.wait_for_ostack_volume_status(destination_project_conn, i_dst_volume.id, 'available')
lib.log_or_assert(args,
f"H.4 Destination OpenStack volume available (name:{i_dst_volume.name}, id:{i_dst_volume.id})",
i_dst_volume_status == 'available')
i_volume_mapping = {'source': {'ceph_pool_name': args.source_ceph_cinder_pool_name,
'ceph_rbd_image_name': i_source_volume.id},
'destination': {'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'volume_id': i_dst_volume.id}}
lib.migrate_rbd_image(args, i_volume_mapping)
i_dst_volume_detail = destination_project_conn.block_storage.find_volume(i_dst_volume.id)
lib.log_or_assert(args,
f"H.5 Destination OpenStack volume available (name:{i_dst_volume_detail.name}, id:{i_dst_volume_detail.id})",
i_dst_volume_detail.status == 'available')
# main() call (argument parsing)
......@@ -551,7 +410,7 @@ if __name__ == "__main__":
AP.add_argument('--explicit-server-names', default=None, required=False,
help='(Optional) List of explicit server names or IDs to be migrated. Delimiter comma or space.')
AP.add_argument('--explicit-volume-names', default=None, required=False,
help='(Optional) List of explicit volume names or IDs to be migrated. Delimiter comma or space.')
help='(Optional) List of explicit volume (names or) IDs to be migrated. Delimiter comma or space.')
AP.add_argument('--migrate-also-inactive-servers', default=False, required=False, action='store_true',
help='(Optional) Migrate also inactive servers (i.e. PAUSED/SHUTOFF).')
......@@ -567,4 +426,5 @@ if __name__ == "__main__":
ARGS = AP.parse_args()
ARGS.logger = logging.getLogger("project-migrator")
ARGS.explicit_server_names = lib.normalize_servers(ARGS.explicit_server_names)
ARGS.explicit_volume_names = lib.normalize_servers(ARGS.explicit_volume_names)
sys.exit(main(ARGS))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment