Newer
Older
#!/usr/bin/env python3
"""
OpenStack project multicloud migrator
Usage example:
* ./project-migrator.py --source-openrc ~/c/prod-einfra_cz_migrator.sh.inc
--destination-openrc ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc
--project-name meta-cloud-new-openstack
--validation-a-source-server-id <>
--ceph-migrator-sshkeyfile $HOME/.ssh/id_rsa.LenovoThinkCentreE73
* ./project-migrator.py --source-openrc ~/c/prod-einfra_cz_migrator.sh.inc --destination-openrc ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc --project-name meta-cloud-new-openstack --validation-a-source-server-id <> --ceph-migrator-sshkeyfile $HOME/.ssh/id_rsa.LenovoThinkCentreE73 --explicit-server-names freznicek-rook-internal-external-20-worker-1
*
"""
import argparse
import logging
import math
import os
import os.path
import sys
def main(args):
""" """
# connect to source cloud
source_migrator_openrc = lib.get_openrc(args.source_openrc)
source_migrator_conn = lib.get_ostack_connection(source_migrator_openrc)
args.logger.info("A.1 Source OpenStack cloud connected as migrator user")
# connect to destination cloud
destination_migrator_openrc = lib.get_openrc(args.destination_openrc)
destination_migrator_conn = lib.get_ostack_connection(destination_migrator_openrc)
args.logger.info("A.2 Destination OpenStack cloud connected as migrator user")
# check project exists in source and destination
source_project = lib.get_ostack_project(source_migrator_conn, args.project_name)
lib.log_or_assert(args, "B.1 Source OpenStack cloud project exists", source_project)
source_project_type = lib.get_ostack_project_type(source_migrator_conn, source_project)
lib.log_or_assert(args, f"B.2 Source OpenStack cloud project type is {source_project_type}",
source_project_type)
destination_project = lib.get_ostack_project(destination_migrator_conn, args.project_name)
lib.log_or_assert(args, "B.10 Destination OpenStack cloud project exists", destination_project)
destination_project_type = lib.get_ostack_project_type(destination_migrator_conn, destination_project)
lib.log_or_assert(args, f"B.11 Destination OpenStack cloud project type is {destination_project_type}",
destination_project_type)
lib.log_or_assert(args, "B.12 Source and destination project types match",
source_project_type == destination_project_type)
# check user context switching & quotas
source_project_conn = lib.get_ostack_connection(source_migrator_openrc | {'OS_PROJECT_NAME': source_project.name})
#source_project_quotas = source_project_conn.get_compute_quotas(source_project.id)
#lib.log_or_assert(args, f"C.1 Context switching to source OpenStack cloud project succeeded (id:{source_project.id})",

František Řezníček
committed
# source_project_quotas and source_project_quotas.id == source_project.id)
destination_project_conn = lib.get_ostack_connection(destination_migrator_openrc | {'OS_PROJECT_NAME': destination_project.name})
#destination_project_quotas = destination_project_conn.get_compute_quotas(destination_project.id)
#lib.log_or_assert(args, f"C.2 Context switching to destination OpenStack cloud project succeeded (id:{destination_project.id})",

František Řezníček
committed
# destination_project_quotas and destination_project_quotas.id == destination_project.id)
# connect to migrator node
reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name, 'uname -a')
lib.log_or_assert(args, "D.1 Migrator host is reachable", 'Linux' in reply_stdout and reply_ecode == 0)
reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
'/root/migrator/ceph-accessible.sh')
lib.log_or_assert(args, "D.2 Ceph is available from the migrator host", reply_ecode == 0)
source_rbd_images = {args.source_ceph_ephemeral_pool_name: None,
args.source_ceph_cinder_pool_name: None}
for i_pool_name in source_rbd_images.keys():
source_rbd_images[i_pool_name] = lib.ceph_rbd_images_list(args, i_pool_name)
lib.log_or_assert(args, f"D.3 Source cloud RBD images are received ({i_pool_name}).", source_rbd_images[i_pool_name])
source_keypairs = lib.get_source_keypairs(args)
lib.log_or_assert(args, "D.4 Source OpenStack cloud keypairs received.", source_keypairs)
# get source/destination entities in the project
source_project_servers = lib.get_ostack_project_servers(source_project_conn, source_project)
args.logger.info("E.1 Source OpenStack cloud servers received")
lib.assert_entity_ownership(source_project_servers, source_project)
args.logger.info(f"E.2 Source OpenStack cloud project has {len(source_project_servers)} servers.")
source_project_flavors = lib.get_ostack_project_flavors(source_project_conn)
lib.log_or_assert(args, "E.4 Source OpenStack flavor list received", source_project_flavors)
destination_project_servers = lib.get_ostack_project_servers(destination_project_conn, destination_project)
args.logger.info("E.10 Destination OpenStack cloud servers received")
lib.assert_entity_ownership(destination_project_servers, destination_project)
args.logger.info(f"E.11 Destination OpenStack cloud project has {len(destination_project_servers)} servers.")
destination_project_flavors = lib.get_ostack_project_flavors(destination_project_conn)
lib.log_or_assert(args, "E.12 Destination OpenStack flavor list received", destination_project_flavors)
lib.log_or_assert(args, "E.20 Source OpenStack VM ID validation succeeded",
args.validation_a_source_server_id in [i_server.id for i_server in source_project_servers])
destination_image = destination_project_conn.image.find_image(args.destination_bootable_volume_image_name)
lib.log_or_assert(args, "E.30 Destination image found and received", destination_image)
destination_fip_network = destination_project_conn.network.find_network(args.destination_ipv4_external_network)
lib.log_or_assert(args, "E.31 Destination cloud FIP network detected", destination_fip_network)
args.logger.info(f"F.0 Source VM servers: {[ i_source_server.name for i_source_server in source_project_servers]}")
for i_source_server in source_project_servers:
i_source_server_detail = source_project_conn.compute.find_server(i_source_server.id)
i_source_server_has_fip = lib.server_detect_floating_address(i_source_server_detail)
if args.explicit_server_names and i_source_server.name not in args.explicit_server_names:
args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to --explicit-server-names={args.explicit_server_names}")
continue

František Řezníček
committed
if i_source_server_detail.status != 'ACTIVE':
args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to VM status {i_source_server_detail.status}. Use --migrate-also-inactive-servers if necessary.")
continue
# detect destination VM does not exist
i_destination_server_detail = destination_project_conn.compute.find_server(lib.get_migrated_resource_name(args, i_source_server_detail.name))

František Řezníček
committed
if i_destination_server_detail:
args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} as equivalent VM exists in destination cloud (name: {i_destination_server_detail.name})")
continue
args.logger.info(f"F.1 server migration started - name:{i_source_server_detail.name}, id:{i_source_server_detail.id}, keypair: {i_source_server_detail.key_name}, flavor: {i_source_server_detail.flavor}, sec-groups:{i_source_server_detail.security_groups}, root_device_name: {i_source_server_detail.root_device_name}, block_device_mapping: {i_source_server_detail.block_device_mapping}, attached-volumes: {i_source_server_detail.attached_volumes}")

František Řezníček
committed
# network, subnet detection, TODO: better
i_source_server_network_names = i_source_server_detail.addresses.keys()
i_destination_server_networks = []
for i_source_network_name in i_source_server_network_names:
i_destination_network_name = lib.get_destination_network(i_source_network_name)
if not i_destination_network_name and args.destination_group_project_network_name != "":
# if network is not mapped use network provided from switch --destination-group-project-network-name
i_destination_network_name = args.destination_group_project_network_name
lib.log_or_assert(args, f"F.2 Source to Destination network mapping succeeeded ({i_source_network_name}->{i_destination_network_name}). Read --destination-group-project-network-name description for more details", i_destination_network_name)
i_destination_network = destination_project_conn.network.find_network(i_destination_network_name)
lib.log_or_assert(args, f"F.3 Destination network exists ({i_destination_network})", i_destination_network)
i_destination_server_networks.append(i_destination_network)
# flavor detection
i_source_server_flavor_name = i_source_server_detail.flavor.name
i_destination_server_flavor_name = lib.get_destination_flavor(i_source_server_flavor_name)
lib.log_or_assert(args, f"F.5 Source to Destination flavor mapping succeeeded ({i_source_server_flavor_name}->{i_destination_server_flavor_name})",
i_destination_server_flavor_name)
lib.log_or_assert(args, "F.6 Destination OpenStack flavor exists",
[ i_flavor for i_flavor in destination_project_flavors if i_flavor.name == i_destination_server_flavor_name ])
# keypair detection / creation
i_source_server_keypair = lib.get_source_keypair(source_keypairs, i_source_server_detail.key_name, i_source_server_detail.user_id)
lib.log_or_assert(args, f"F.7 Source OpenStack server keypair found ({i_source_server_keypair['name']})", i_source_server_keypair)
i_destination_server_keypair = None
if i_destination_server_keypairs := [i_keypair for i_keypair in destination_project_conn.list_keypairs() if i_keypair.name == lib.get_migrated_resource_name(args, i_source_server_detail.key_name)]:
i_destination_server_keypair = i_destination_server_keypairs[0]
lib.log_or_assert(args, f"F.8 Destination OpenStack server keypair found already ({i_destination_server_keypair.name})", i_destination_server_keypair)
else:
i_destination_server_keypair = lib.create_keypair(args, destination_project_conn, i_source_server_keypair)
args.logger.info("F.8 Destination OpenStack server keypair created")
lib.log_or_assert(args, f"F.9 Destination OpenStack server keypair exists ({i_destination_server_keypair.name})", i_destination_server_keypair)
# security group

František Řezníček
committed
#source_project_security_groups = get_ostack_project_security_groups(source_project_conn, source_project)
#destination_project_security_groups = get_ostack_project_security_groups(destination_project_conn, destination_project)
i_destination_server_security_groups=[]

František Řezníček
committed
for i_source_server_security_group_name in set([i_sg['name'] for i_sg in i_source_server_detail.security_groups]):
i_source_server_security_group = source_project_conn.network.find_security_group(i_source_server_security_group_name, project_id=source_project.id)
i_destination_server_security_group = None
if i_destination_server_security_group := destination_project_conn.network.find_security_group(lib.get_migrated_resource_name(args, i_source_server_security_group.name),

František Řezníček
committed
project_id=destination_project.id):
lib.log_or_assert(args, f"F.10 Destination OpenStack server security group found already ({i_destination_server_security_group.name})",
i_destination_server_security_group)
else:
args.logger.info("F.10 Destination OpenStack server matching security group not found, gets created.")
i_destination_server_security_group = lib.create_security_group(args, destination_project_conn, i_source_server_security_group, destination_project)
lib.log_or_assert(args, f"F.10 Destination OpenStack server security group created ({i_destination_server_security_group.name})",
i_destination_server_security_group)
lib.log_or_assert(args, f"F.11 Destination OpenStack server security group exists ({i_destination_server_security_group.name})",

František Řezníček
committed
i_destination_server_security_group)
i_destination_server_security_groups.append(i_destination_server_security_group)
lib.log_or_assert(args, "F.12 Destination OpenStack server - destination security groups exists",

František Řezníček
committed
i_destination_server_security_groups)
# volume detection
i_server_block_device_mappings = [ ]
# schema: [ {}, ... ]
# where {} is following dict
# { 'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image', 'volume_attachment_id': <>, 'volume_id': <>,
# 'ceph_pool_name': <pool-name>, 'ceph_rbd_image_name': <rbd-image-name>, 'ceph_rbd_image_size': <size-gb>}
# OR
# {'block_storage_type': 'ceph-rbd-image', 'ceph_pool_name': <pool-name>, 'ceph_rbd_image_name': <rbd-image-name>, 'ceph_rbd_image_size': <size-gb> } ]
# 'destination': {'volume_size': <size-gb>, 'volume_id': <vol-id>, 'device_name': <dev-name>, 'volume_bootable': True/False}
# }
i_source_server_root_device_name = i_source_server_detail.root_device_name
lib.log_or_assert(args, f"F.20 Source OpenStack server - root device name received ({i_source_server_root_device_name})",
i_source_server_root_device_name)
i_source_server_volume_attachments = tuple(source_project_conn.compute.volume_attachments(i_source_server_detail.id))
assert_msg = f"F.21 Source OpenStack server - volume attachments received {i_source_server_volume_attachments}"
#pprint.pprint(i_source_server_volume_attachments)
i_source_ceph_ephemeral_rbd_image = None
if i_source_server_root_device_name in [ i_source_server_attachment.device for i_source_server_attachment in i_source_server_volume_attachments ]:
args.logger.info("F.22 Source OpenStack server - one of attached volume is attached as the root partition")
# populate i_server_block_device_mappings
for i_source_server_volume_attachment in i_source_server_volume_attachments:
i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id)
i_server_block_device_mappings.append({'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
'volume_attachment_id': i_source_server_volume_attachment.id,
'volume_id': i_server_volume.id,
'ceph_pool_name': args.source_ceph_cinder_pool_name,
'ceph_rbd_image_name': i_server_volume.id},
'destination': {'volume_size': i_server_volume.size,
'volume_name': lib.get_migrated_resource_name(args, i_server_volume.name),
'volume_description': i_server_volume.description,
'volume_id': None,
'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'device_name': os.path.basename(i_source_server_volume_attachment.device),
'volume_bootable': i_source_server_root_device_name == i_source_server_volume_attachment.device}})
else:
args.logger.info("F.22 Source OpenStack server - none of attached volumes is attached as the root partition. Seeking for root partition RBD image")
if f"{i_source_server_detail.id}_disk" in source_rbd_images[args.source_ceph_ephemeral_pool_name]:
i_source_ceph_ephemeral_rbd_image = f"{i_source_server_detail.id}_disk"
args.logger.info(f"F.23 Source OpenStack server - Root partition found as RBD image {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}")
# get rbd image info / size
i_source_ceph_ephemeral_rbd_image_data = lib.ceph_rbd_image_info(args, args.source_ceph_ephemeral_pool_name,
i_source_ceph_ephemeral_rbd_image)
lib.log_or_assert(args, f"F.24 Source OpenStack ceph RBD image information received {i_source_ceph_ephemeral_rbd_image_data}",
i_source_ceph_ephemeral_rbd_image_data and 'size' in i_source_ceph_ephemeral_rbd_image_data)
i_source_ceph_ephemeral_rbd_image_size = math.ceil(i_source_ceph_ephemeral_rbd_image_data['size'] / 1024 / 1024 / 1024)
lib.log_or_assert(args, f"F.25 Source OpenStack ceph RBD image size calculated ({i_source_ceph_ephemeral_rbd_image_size})",
i_source_ceph_ephemeral_rbd_image_size)
# populate i_server_block_device_mappings
## initial disk
i_server_block_device_mappings.append({'source': {'block_storage_type': 'ceph-rbd-image',
'ceph_pool_name': args.source_ceph_ephemeral_pool_name,
'ceph_rbd_image_name': i_source_ceph_ephemeral_rbd_image,
'ceph_rbd_image_size': i_source_ceph_ephemeral_rbd_image_size},
'destination': {'volume_size': i_source_ceph_ephemeral_rbd_image_size,
'volume_name': lib.get_migrated_resource_name(args, i_source_ceph_ephemeral_rbd_image),
'volume_description': f"RBD {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}",
'volume_id': None,
'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'device_name': os.path.basename(i_source_server_root_device_name),
'volume_bootable': True}})
## other disks attached to VM
for i_source_server_volume_attachment in i_source_server_volume_attachments:
i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id)
i_server_block_device_mappings.append({'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
'volume_attachment_id': i_source_server_volume_attachment.id,
'volume_id': i_server_volume.id,
'ceph_pool_name': args.source_ceph_cinder_pool_name,
'ceph_rbd_image_name': i_server_volume.id},
'destination': {'volume_size': i_server_volume.size,
'volume_name': lib.get_migrated_resource_name(args, i_server_volume.name),
'volume_description': i_server_volume.description,
'volume_id': None,
'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'device_name': os.path.basename(i_source_server_volume_attachment.device),
'volume_bootable': i_source_server_root_device_name == i_source_server_volume_attachment.device}})
lib.log_or_assert(args, "F.26 Source OpenStack server - root partition detected",
i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['source'])
lib.log_or_assert(args, "F.27 Destination OpenStack server - root partition details generated",
i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['destination'])
#pprint.pprint(i_server_block_device_mappings)
# volume creation in destination cloud
for i_destination_server_block_device_mapping in i_server_block_device_mappings:
i_new_volume_args = {'name': i_destination_server_block_device_mapping['destination']['volume_name'],
'size': i_destination_server_block_device_mapping['destination']['volume_size'],
'description': f"{i_destination_server_block_device_mapping['destination']['volume_description']}, g1-to-g2-migrated"}
# TO BE REVISED: this seems to be the only way how to create bootable volume using openstacksdk
if i_destination_server_block_device_mapping['destination']['volume_bootable']:
i_new_volume_args['imageRef'] = destination_image.id
i_new_volume = destination_project_conn.block_storage.create_volume(**i_new_volume_args)
lib.log_or_assert(args, f"F.29 Destination OpenStack volume created (name:{i_new_volume.name}, id:{i_new_volume.id})", i_new_volume)
lib.wait_for_ostack_volume_status(destination_project_conn, i_new_volume.id, 'available')
lib.log_or_assert(args, f"F.30 Destination OpenStack volume available (name:{i_new_volume.name}, id:{i_new_volume.id})",
lib.wait_for_ostack_volume_status(destination_project_conn, i_new_volume.id, 'available') == 'available')
# remember volume ID
i_destination_server_block_device_mapping['destination']['volume_id'] = i_new_volume.id
for i_destination_server_block_device_mapping in i_server_block_device_mappings:
lib.log_or_assert(args, "F.31 Destination OpenStack volume IDs properly stored", i_destination_server_block_device_mapping['destination']['volume_id'])
# VM stop, wait for SHUTOFF
if i_source_server_detail.status != 'SHUTOFF':
source_project_conn.compute.stop_server(i_source_server_detail)
lib.log_or_assert(args, "F.33 Source OpenStack VM server stopped",
lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'SHUTOFF') == "SHUTOFF")
# volume migration (browse i_server_block_device_mappings)
for i_server_block_device_mapping in i_server_block_device_mappings:
## G1: detect existing G1 RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
i_source_server_rbd_images, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_server_block_device_mapping['source']['ceph_rbd_image_name'])
lib.log_or_assert(args, "F.41 Source OpenStack VM RBD image exists - query succeeded", i_ecode == 0, locals())
lib.log_or_assert(args, "F.41 Source OpenStack VM RBD image exists - single image returned",
i_source_server_rbd_images and len(i_source_server_rbd_images) == 1, locals())
i_source_server_rbd_image = i_source_server_rbd_images[0]
## G2: find volume
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
i_destination_server_rbd_images, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_server_block_device_mapping['destination']['volume_id'])
lib.log_or_assert(args, "F.42 Destination OpenStack VM RBD image exists - query succeeded", i_ecode == 0, locals())
lib.log_or_assert(args, "F.42 Destination OpenStack VM RBD image exists - single image returned",
i_destination_server_rbd_images and len(i_destination_server_rbd_images) == 1, locals())
i_destination_server_rbd_image = i_destination_server_rbd_images[0]
## G1: create RBD image protected snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
i_source_rbd_image_snapshot_name = f"g1-g2-migration-{i_source_server_rbd_image}"
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.43 Source OpenStack VM RBD image has non-colliding snapshot " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name})",
i_ecode != 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_create(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.44 Source OpenStack VM RBD image snapshot created " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name})",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.45 Source OpenStack VM RBD image snapshot exists " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name})",
i_ecode == 0, locals())
## G2: delete RBD image
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
## G2: confirm volume is deleted
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_delete(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, f"F.46 Destination OpenStack VM RBD image deletion succeeded ({i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image})",
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, f"F.47 Destination OpenStack VM RBD image does not exist ({i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image})",
## G1: clone from snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
i_source_rbd_cloned_image_name = f"g1-g2-migration-{i_source_server_rbd_image}"
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_clone(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, "F.48 Source OpenStack VM RBD image cloned succesfully " \
f"({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name} -> {i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.49 Source OpenStack VM cloned RBD image exists ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
## G1: flatten cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_flatten(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.50 Source OpenStack VM cloned RBD image flatten successfully ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
## G1->G2: copy RBD image to target pool
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_copy(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, "F.51 Source OpenStack VM RBD image copied G1 -> G2 succesfully" \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name} -> {i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image}",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['destination']['ceph_pool_name'],
i_destination_server_rbd_image)
lib.log_or_assert(args, f"F.52 Destination OpenStack VM RBD image exists ({i_server_block_device_mapping['destination']['ceph_pool_name']}/{i_destination_server_rbd_image})",
## G1: delete cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_delete(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.53 Source OpenStack VM RBD cloned image deletion succeeded ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_rbd_cloned_image_name)
lib.log_or_assert(args, f"F.54 Source OpenStack VM cloned RBD image does not exist anymore ({i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_rbd_cloned_image_name})",
## G1: remove created snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.55 Source OpenStack VM RBD image snapshot still exists " \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name}",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_delete(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.56 Source OpenStack VM RBD image snapshot deletion succeeeded " \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name}",
i_ecode == 0, locals())
i_stdout, i_stderr, i_ecode = lib.ceph_rbd_image_snapshot_exists(args,
i_server_block_device_mapping['source']['ceph_pool_name'],
i_source_server_rbd_image,
i_source_rbd_image_snapshot_name)
lib.log_or_assert(args, "F.57 Source OpenStack VM RBD image snapshot does not exist anymore " \
f"{i_server_block_device_mapping['source']['ceph_pool_name']}/{i_source_server_rbd_image}@{i_source_rbd_image_snapshot_name}",
i_ecode != 0, locals())
# start server in source cloud, wait for back being 'ACTIVE'
if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status:
if i_source_server_detail.status == 'ACTIVE':
source_project_conn.compute.start_server(i_source_server_detail)
lib.log_or_assert(args, "F.49 Source OpenStack VM server started back",
lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'ACTIVE') == "ACTIVE",
# start server in destination cloud
i_destination_server_flavor = destination_project_conn.compute.find_flavor(i_destination_server_flavor_name)
i_destination_server_args = {'name': lib.get_migrated_resource_name(args, i_source_server_detail.name),
'flavorRef': i_destination_server_flavor.id,
'block_device_mapping_v2': [ {'source_type': 'volume',
'destination_type': 'volume',
'uuid': i_server_block_device_mapping['destination']['volume_id'],
'device_name': i_server_block_device_mapping['destination']['device_name'],
'boot_index': 0 if i_server_block_device_mapping['destination']['volume_bootable'] else None}
for i_server_block_device_mapping in i_server_block_device_mappings ],
'boot_volume': i_server_block_device_mappings[0]['destination']['volume_id'],
'key_name': i_destination_server_keypair["name"],
'networks': [ {'uuid': i_network.id} for i_network in i_destination_server_networks ]}
lib.log_or_assert(args, "F.60 Destination OpenStack server arguments are generated with valid block-device-mapping",
i_destination_server_args['block_device_mapping_v2'], locals())
lib.log_or_assert(args, "F.60 Destination OpenStack server arguments are generated with valid network configuration",
i_destination_server_args['networks'], locals())
#pprint.pprint(i_destination_server_args)
i_destination_server = destination_project_conn.compute.create_server(**i_destination_server_args)
lib.log_or_assert(args, "F.61 Destination OpenStack server is created", i_destination_server, locals())
i_destination_server = destination_project_conn.compute.wait_for_server(i_destination_server)
lib.log_or_assert(args, "F.62 Destination OpenStack server got ACTIVE",
i_destination_server.status == 'ACTIVE', locals())
# add security groups to the destination server (if missing)
for i_destination_server_security_group_id, i_destination_server_security_group_name in set([(i_destination_server_security_group.id, i_destination_server_security_group.name) for i_destination_server_security_group in i_destination_server_security_groups]):
if {'name': i_destination_server_security_group_name } not in i_destination_server.security_groups:
destination_project_conn.add_server_security_groups(i_destination_server.id, i_destination_server_security_group_id)
i_destination_server_fip = destination_project_conn.network.create_ip(floating_network_id=destination_fip_network.id)
lib.log_or_assert(args, "F.63 Destination OpenStack server FIP is created", i_destination_server_fip, locals())
i_destination_server_port = lib.get_server_floating_ip_port(destination_project_conn, i_destination_server)
lib.log_or_assert(args, "F.64 Destination OpenStack server FIP port is detected", i_destination_server_port, locals())
destination_project_conn.network.add_ip_to_port(i_destination_server_port, i_destination_server_fip)
args.logger.info(f"F.66 Source OpenStack server name:{i_source_server_detail.name} migrated into destination one name:{i_destination_server.name} id:{i_destination_server.id}")
# main() call (argument parsing)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
AP = argparse.ArgumentParser(epilog=globals().get('__doc__'),
formatter_class=argparse.RawDescriptionHelpFormatter)
AP.add_argument('--source-openrc', default=None, type=argparse.FileType('r'),
required=True, help='Source cloud authentication (OpenRC file)')
AP.add_argument('--destination-openrc', default=None, type=argparse.FileType('r'),
required=True, help='Destination cloud authentication (OpenRC file)')
AP.add_argument('--ceph-migrator-host', default='controller-ostack.stage.cloud.muni.cz',
help='OpenStack migrator ceph node host')
AP.add_argument('--ceph-migrator-user', default='root',
help='OpenStack migrator ceph node username')
AP.add_argument('--ceph-migrator-sshkeyfile', default=None, type=argparse.FileType('r'),
help='OpenStack migrator SSH keyfile')
AP.add_argument('--source-ceph-cinder-pool-name', default='prod-cinder-volumes',
help='Source OpenStack/ceph cloud Cinder pool name')
AP.add_argument('--source-ceph-ephemeral-pool-name', default='prod-ephemeral-vms',
help='Source OpenStack/ceph cloud "ephemeral on ceph" or "libvirt ephemeral" pool name')
AP.add_argument('--destination-ceph-cinder-pool-name', default='cloud-cinder-volumes-prod-brno',
help='Destination OpenStack/ceph cloud Cinder pool name')
AP.add_argument('--destination-ceph-ephemeral-pool-name', default='cloud-ephemeral-volumes-prod-brno',
help='Destination OpenStack/ceph cloud "ephemeral on ceph" or "libvirt ephemeral" pool name')
AP.add_argument('--source-keypair-xml-dump-file', default='/root/migrator/prod-nova_api_key_pairs.dump.xml',
help='Source OpenStack cloud keypair SQL/XML dump file name')
AP.add_argument('--destination-bootable-volume-image-name', default='cirros-0-x86_64',
help='Destination cloud bootable volumes are made on top of public image. Name of destination cloud image.')
AP.add_argument('--destination-ipv4-external-network', default='external-ipv4-general-public',
help='Destination cloud IPV4 external network.')

František Řezníček
committed
AP.add_argument('--destination-entity-prefix', default='migrated-',
help='Destination cloud migrated cloud entity names prefix.')
AP.add_argument('--destination-group-project-network-name', default='group-project-network',
help='Use pre-created network for group project entities (created by cloud-entities), this is preferred. ' \
'Set to "" for creation new router/subnet/network this part is not yet implemented.')
AP.add_argument('--project-name', default=None, required=True,
help='OpenStack project name (identical name in both clouds required)')
AP.add_argument('--explicit-server-names', default=None, required=False,
help='(Optional) List of explicit server names or IDs to be migrated. Delimiter comma or space.')

František Řezníček
committed
AP.add_argument('--explicit-volume-names', default=None, required=False,
help='(Optional) List of explicit volume names or IDs to be migrated. Delimiter comma or space.')
AP.add_argument('--migrate-also-inactive-servers', default=False, required=False, action='store_true',
help='(Optional) Migrate also inactive servers (i.e. PAUSED/SHUTOFF).')
AP.add_argument('--validation-a-source-server-id', default=None, required=True,
help='For validation any server ID from source OpenStack project')
AP.add_argument('--exception-trace-file', default="project-migrator.dump",
required=False,
help='Exception / assert dump state file')
logging.basicConfig(level=logging.INFO, # Set the logging level
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ARGS = AP.parse_args()
ARGS.logger = logging.getLogger("project-migrator")
ARGS.explicit_server_names = lib.normalize_servers(ARGS.explicit_server_names)
sys.exit(main(ARGS))