#!/usr/bin/env python3 """ OpenStack project multi-cloud migrator Tool performs OpenStack workflow migration from single OpenStack cloud to another one. Tool expects same block storage connected to both clouds to be able to perform storage transfer quickly. Block storage is transferred using external ceph migrator server node using ceph low-level commands. Ceph migrator server node is allowed to perform ceph operations (ceph storage access is blocked outside OpenStack servers) and also provides enough disk space for object storage migration. TODO: Object storage migration Tool relies on main libraries: * openstacksdk for OpenStack management * paramiko for low-level ceph storage migration (--ceph-migrator-host) Usage example: * Migrate all running virtual servers from source OpenStack ~/c/prod-einfra_cz_migrator.sh.inc project meta-cloud-new-openstack into destination one defined by OpenRC ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc, validate user's request by validating server existence with ID server-id-xyz in spource project $ ./project-migrator.py --source-openrc ~/c/prod-einfra_cz_migrator.sh.inc --destination-openrc ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc --project-name meta-cloud-new-openstack --validation-a-source-server-id server-id-xyz --ceph-migrator-sshkeyfile ~/.ssh/id_rsa.g1-g2-ostack-cloud-migration """ import argparse import logging import math import os import os.path import pprint import sys import lib import clib import olib def main(args): """ main project migration loop """ # connect to source cloud source_migrator_openrc = lib.get_openrc(args.source_openrc) source_migrator_conn = lib.get_ostack_connection(source_migrator_openrc) args.logger.info("A.1 Source OpenStack cloud connected as migrator user") # connect to destination cloud destination_migrator_openrc = lib.get_openrc(args.destination_openrc) destination_migrator_conn = lib.get_ostack_connection(destination_migrator_openrc) args.logger.info("A.2 Destination OpenStack cloud connected as migrator user") # check project exists in source and destination source_project_name, destination_project_name = lib.get_ostack_project_names(args.project_name) source_project = lib.get_ostack_project(source_migrator_conn, source_project_name) lib.log_or_assert(args, "B.1 Source OpenStack cloud project exists", source_project) source_project_type = lib.get_ostack_project_type(source_migrator_conn, source_project) lib.log_or_assert(args, f"B.2 Source OpenStack cloud project type is {source_project_type}", source_project_type) destination_project = lib.get_ostack_project(destination_migrator_conn, destination_project_name) lib.log_or_assert(args, "B.10 Destination OpenStack cloud project exists", destination_project) destination_project_type = lib.get_ostack_project_type(destination_migrator_conn, destination_project) lib.log_or_assert(args, f"B.11 Destination OpenStack cloud project type is {destination_project_type}", destination_project_type) lib.log_or_assert(args, "B.12 Source and destination project types match", source_project_type == destination_project_type) if destination_project_type == 'group' and lib.executed_in_ci(): lib.log_or_assert(args, "B.13 Cloud group project migration is executed by authorized person (cloud/openstack team member).", lib.executed_as_admin_user_in_ci()) # check user context switching & quotas source_project_conn = lib.get_ostack_connection(source_migrator_openrc | {'OS_PROJECT_NAME': source_project.name}) #source_project_quotas = source_project_conn.get_compute_quotas(source_project.id) #lib.log_or_assert(args, f"C.1 Context switching to source OpenStack cloud project succeeded (id:{source_project.id})", # source_project_quotas and source_project_quotas.id == source_project.id) destination_project_conn = lib.get_ostack_connection(destination_migrator_openrc | {'OS_PROJECT_NAME': destination_project.name}) #destination_project_quotas = destination_project_conn.get_compute_quotas(destination_project.id) #lib.log_or_assert(args, f"C.2 Context switching to destination OpenStack cloud project succeeded (id:{destination_project.id})", # destination_project_quotas and destination_project_quotas.id == destination_project.id) # connect to migrator node reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user, args.ceph_migrator_sshkeyfile.name, 'uname -a') lib.log_or_assert(args, "D.1 Migrator host is reachable", 'Linux' in reply_stdout and reply_ecode == 0) reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user, args.ceph_migrator_sshkeyfile.name, '/root/migrator/ceph-accessible.sh') lib.log_or_assert(args, "D.2 Ceph is available from the migrator host", reply_ecode == 0) source_rbd_images = {args.source_ceph_ephemeral_pool_name: None, args.source_ceph_cinder_pool_name: None} for i_pool_name in source_rbd_images.keys(): source_rbd_images[i_pool_name] = clib.ceph_rbd_images_list(args, i_pool_name) lib.log_or_assert(args, f"D.3 Source cloud RBD images are received ({i_pool_name}).", source_rbd_images[i_pool_name]) source_keypairs = lib.get_source_keypairs(args) lib.log_or_assert(args, "D.4 Source OpenStack cloud keypairs received.", source_keypairs) # get source/destination entities in the project source_project_servers = lib.get_ostack_project_servers(source_project_conn, source_project) args.logger.info("E.1 Source OpenStack cloud servers received") lib.assert_entity_ownership(source_project_servers, source_project) args.logger.info(f"E.2 Source OpenStack cloud project has {len(source_project_servers)} servers.") source_project_flavors = lib.get_ostack_project_flavors(source_project_conn) lib.log_or_assert(args, "E.4 Source OpenStack flavor list received", source_project_flavors) destination_project_servers = lib.get_ostack_project_servers(destination_project_conn, destination_project) args.logger.info("E.10 Destination OpenStack cloud servers received") lib.assert_entity_ownership(destination_project_servers, destination_project) args.logger.info(f"E.11 Destination OpenStack cloud project has {len(destination_project_servers)} servers.") destination_project_flavors = lib.get_ostack_project_flavors(destination_project_conn) lib.log_or_assert(args, "E.12 Destination OpenStack flavor list received", destination_project_flavors) lib.log_or_assert(args, "E.20 Source OpenStack VM ID validation succeeded", args.validation_a_source_server_id in [i_server.id for i_server in source_project_servers]) destination_image = destination_project_conn.image.find_image(args.destination_bootable_volume_image_name) lib.log_or_assert(args, "E.30 Destination image found and received", destination_image) destination_fip_network = destination_project_conn.network.find_network(args.destination_ipv4_external_network) lib.log_or_assert(args, "E.31 Destination cloud FIP network detected", destination_fip_network) lib.duplicate_ostack_project_security_groups(args, source_project_conn, destination_project_conn, source_project, destination_project) args.logger.info("E.40 Destination OpenStack project security groups duplicated") args.logger.info("F.0 Main looping started") args.logger.info(f"F.0 Source VM servers: {[ i_source_server.name for i_source_server in source_project_servers]}") for i_source_server in source_project_servers: i_source_server_detail = source_project_conn.compute.find_server(i_source_server.id) i_source_server_has_fip = lib.server_detect_floating_address(i_source_server_detail) if args.explicit_server_names and i_source_server.name not in args.explicit_server_names: args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to --explicit-server-names={args.explicit_server_names}") continue if i_source_server_detail.status != 'ACTIVE': args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to VM status {i_source_server_detail.status}. Use --migrate-also-inactive-servers if necessary.") continue # detect destination VM does not exist i_destination_server_detail = destination_project_conn.compute.find_server(lib.get_dst_resource_name(args, i_source_server_detail.name)) if i_destination_server_detail: args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} as equivalent VM exists in destination cloud (name: {i_destination_server_detail.name})") continue args.logger.info(f"F.1 server migration started - name:{i_source_server_detail.name}, id:{i_source_server_detail.id}, " \ f"keypair: {i_source_server_detail.key_name}, flavor: {i_source_server_detail.flavor}, " \ f"sec-groups:{i_source_server_detail.security_groups}, root_device_name: {i_source_server_detail.root_device_name}, " \ f"block_device_mapping: {i_source_server_detail.block_device_mapping}, " \ f"attached-volumes: {i_source_server_detail.attached_volumes}" \ f"addresses: {i_source_server_detail.addresses}") # network/subnet/router detection & creation i_destination_server_network_addresses = \ olib.get_or_create_dst_server_networking(args, source_project_conn, destination_project_conn, source_project, destination_project, i_source_server_detail) # flavor detection i_source_server_flavor_name = i_source_server_detail.flavor.name i_destination_server_flavor_name = lib.get_destination_flavor(i_source_server_flavor_name) lib.log_or_assert(args, f"F.5 Source to Destination flavor mapping succeeeded ({i_source_server_flavor_name}->{i_destination_server_flavor_name})", i_destination_server_flavor_name) lib.log_or_assert(args, "F.6 Destination OpenStack flavor exists", [ i_flavor for i_flavor in destination_project_flavors if i_flavor.name == i_destination_server_flavor_name ]) # keypair detection / creation i_source_server_keypair = lib.get_source_keypair(source_keypairs, i_source_server_detail.key_name, i_source_server_detail.user_id) lib.log_or_assert(args, f"F.7 Source OpenStack server keypair found ({i_source_server_keypair['name']})", i_source_server_keypair) i_destination_server_keypair = None if i_destination_server_keypairs := [i_keypair for i_keypair in destination_project_conn.list_keypairs() if i_keypair.name == lib.get_dst_resource_name(args, i_source_server_detail.key_name)]: i_destination_server_keypair = i_destination_server_keypairs[0] lib.log_or_assert(args, f"F.8 Destination OpenStack server keypair found already ({i_destination_server_keypair.name})", i_destination_server_keypair) else: i_destination_server_keypair = lib.create_keypair(args, destination_project_conn, i_source_server_keypair) args.logger.info("F.8 Destination OpenStack server keypair created") lib.log_or_assert(args, f"F.9 Destination OpenStack server keypair exists ({i_destination_server_keypair.name})", i_destination_server_keypair) # server security group i_destination_server_security_groups=[] for i_source_server_security_group_name in {i_sg['name'] for i_sg in i_source_server_detail.security_groups}: i_source_server_security_group = source_project_conn.network.find_security_group(i_source_server_security_group_name, project_id=source_project.id) i_destination_server_security_group = None if i_destination_server_security_group := destination_project_conn.network.find_security_group(lib.get_dst_resource_name(args, i_source_server_security_group.name), project_id=destination_project.id): lib.log_or_assert(args, f"F.10 Destination OpenStack server security group found already ({i_destination_server_security_group.name})", i_destination_server_security_group) else: args.logger.info("F.10 Destination OpenStack server matching security group not found and gets created.") i_destination_server_security_group = lib.create_security_groups(args, source_project_conn, destination_project_conn, i_source_server_security_group, destination_project) lib.log_or_assert(args, f"F.10 Destination OpenStack server security group created ({i_destination_server_security_group.name})", i_destination_server_security_group) lib.log_or_assert(args, f"F.11 Destination OpenStack server security group exists ({i_destination_server_security_group.name})", i_destination_server_security_group) i_destination_server_security_groups.append(i_destination_server_security_group) lib.log_or_assert(args, "F.12 Destination OpenStack server - destination security groups exists", i_destination_server_security_groups) # volume detection i_server_block_device_mappings = [ ] # schema: [ {}, ... ] # where {} is following dict # { 'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image', 'volume_attachment_id': <>, 'volume_id': <>, # 'ceph_pool_name': <pool-name>, 'ceph_rbd_image_name': <rbd-image-name>, 'ceph_rbd_image_size': <size-gb>} # OR # {'block_storage_type': 'ceph-rbd-image', 'ceph_pool_name': <pool-name>, 'ceph_rbd_image_name': <rbd-image-name>, 'ceph_rbd_image_size': <size-gb> } ] # 'destination': {'volume_size': <size-gb>, 'volume_id': <vol-id>, 'device_name': <dev-name>, 'volume_bootable': True/False} # } i_source_server_root_device_name = i_source_server_detail.root_device_name lib.log_or_assert(args, f"F.20 Source OpenStack server - root device name received ({i_source_server_root_device_name})", i_source_server_root_device_name) i_source_server_volume_attachments = tuple(source_project_conn.compute.volume_attachments(i_source_server_detail.id)) assert_msg = f"F.21 Source OpenStack server - volume attachments received {i_source_server_volume_attachments}" #pprint.pprint(i_source_server_volume_attachments) i_source_ceph_ephemeral_rbd_image = None if i_source_server_root_device_name in [ i_source_server_attachment.device for i_source_server_attachment in i_source_server_volume_attachments ]: args.logger.info("F.22 Source OpenStack server - one of attached volume is attached as the root partition") # populate i_server_block_device_mappings for i_source_server_volume_attachment in i_source_server_volume_attachments: i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id) i_server_block_device_mappings.append(lib.get_server_block_device_mapping(args, i_source_server_volume_attachment, i_server_volume, i_source_server_root_device_name)) else: args.logger.info("F.22 Source OpenStack server - none of attached volumes is attached as the root partition. Seeking for root partition RBD image") if f"{i_source_server_detail.id}_disk" in source_rbd_images[args.source_ceph_ephemeral_pool_name]: i_source_ceph_ephemeral_rbd_image = f"{i_source_server_detail.id}_disk" args.logger.info(f"F.23 Source OpenStack server - Root partition found as RBD image {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}") # get rbd image info / size i_source_ceph_ephemeral_rbd_image_data, _, _ = clib.ceph_rbd_image_info(args, args.source_ceph_ephemeral_pool_name, i_source_ceph_ephemeral_rbd_image) lib.log_or_assert(args, f"F.24 Source OpenStack ceph RBD image proper information received {i_source_ceph_ephemeral_rbd_image_data}", i_source_ceph_ephemeral_rbd_image_data and 'size' in i_source_ceph_ephemeral_rbd_image_data) i_source_ceph_ephemeral_rbd_image_size = math.ceil(i_source_ceph_ephemeral_rbd_image_data['size'] / 1024 / 1024 / 1024) lib.log_or_assert(args, f"F.25 Source OpenStack ceph RBD image size calculated ({i_source_ceph_ephemeral_rbd_image_size})", i_source_ceph_ephemeral_rbd_image_size) # populate i_server_block_device_mappings ## initial disk i_server_block_device_mappings.append({'source': {'block_storage_type': 'ceph-rbd-image', 'volume_id': i_source_ceph_ephemeral_rbd_image, 'ceph_pool_name': args.source_ceph_ephemeral_pool_name, 'ceph_rbd_image_name': i_source_ceph_ephemeral_rbd_image, 'ceph_rbd_image_size': i_source_ceph_ephemeral_rbd_image_size}, 'destination': {'volume_size': i_source_ceph_ephemeral_rbd_image_size, 'volume_name': lib.get_dst_resource_name(args, i_source_ceph_ephemeral_rbd_image), 'volume_description': f"RBD {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}", 'volume_id': None, 'ceph_pool_name': args.destination_ceph_cinder_pool_name, 'device_name': os.path.basename(i_source_server_root_device_name), 'volume_bootable': True}}) ## other disks attached to VM for i_source_server_volume_attachment in i_source_server_volume_attachments: i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id) i_server_block_device_mappings.append(lib.get_server_block_device_mapping(args, i_source_server_volume_attachment, i_server_volume, i_source_server_root_device_name)) lib.log_or_assert(args, "F.26 Source OpenStack server - root partition detected", i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['source']) lib.log_or_assert(args, "F.27 Destination OpenStack server - root partition details generated", i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['destination']) # volume creation in destination cloud for i_destination_server_block_device_mapping in i_server_block_device_mappings: i_new_volume_args = {'name': i_destination_server_block_device_mapping['destination']['volume_name'], 'size': i_destination_server_block_device_mapping['destination']['volume_size'], 'description': lib.get_dst_resource_desc(args, i_destination_server_block_device_mapping['destination']['volume_description'], i_destination_server_block_device_mapping['source']['volume_id'])} # TODO: this seems to be the only way how to create bootable volume using openstacksdk, check again if i_destination_server_block_device_mapping['destination']['volume_bootable']: i_new_volume_args['imageRef'] = destination_image.id i_new_volume = destination_project_conn.block_storage.create_volume(**i_new_volume_args) lib.log_or_assert(args, f"F.29 Destination OpenStack volume created (name:{i_new_volume.name}, id:{i_new_volume.id})", i_new_volume) lib.wait_for_ostack_volume_status(destination_project_conn, i_new_volume.id, 'available') lib.log_or_assert(args, f"F.30 Destination OpenStack volume available (name:{i_new_volume.name}, id:{i_new_volume.id})", lib.wait_for_ostack_volume_status(destination_project_conn, i_new_volume.id, 'available') == 'available') # remember volume ID i_destination_server_block_device_mapping['destination']['volume_id'] = i_new_volume.id for i_destination_server_block_device_mapping in i_server_block_device_mappings: lib.log_or_assert(args, f"F.31 Destination OpenStack volume IDs properly stored (id:{i_destination_server_block_device_mapping['destination']['volume_id']})", i_destination_server_block_device_mapping['destination']['volume_id']) # VM stop, wait for SHUTOFF if i_source_server_detail.status != 'SHUTOFF': source_project_conn.compute.stop_server(i_source_server_detail) lib.log_or_assert(args, "F.33 Source OpenStack VM server stopped", lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'SHUTOFF') == "SHUTOFF") # volume migration (browse i_server_block_device_mappings) for i_server_block_device_mapping in i_server_block_device_mappings: clib.migrate_rbd_image(args, i_server_block_device_mapping) # start server in source cloud, wait for back being 'ACTIVE' if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status and \ not args.source_servers_left_shutoff: if i_source_server_detail.status == 'ACTIVE': source_project_conn.compute.start_server(i_source_server_detail) lib.log_or_assert(args, "F.34 Source OpenStack VM server started back", lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'ACTIVE') == "ACTIVE", locals()) # start server in destination cloud i_destination_server_flavor = destination_project_conn.compute.find_flavor(i_destination_server_flavor_name) # Note: argument network is not valid anymore, use networks i_destination_server_args = {'name': lib.get_dst_resource_name(args, i_source_server_detail.name), 'flavorRef': i_destination_server_flavor.id, 'block_device_mapping_v2': [ {'source_type': 'volume', 'destination_type': 'volume', 'uuid': i_server_block_device_mapping['destination']['volume_id'], 'device_name': i_server_block_device_mapping['destination']['device_name'], 'boot_index': 0 if i_server_block_device_mapping['destination']['volume_bootable'] else None} for i_server_block_device_mapping in i_server_block_device_mappings ], 'boot_volume': i_server_block_device_mappings[0]['destination']['volume_id'], 'key_name': i_destination_server_keypair["name"], 'networks': [ lib.describe_server_network_connection(args, destination_project_conn, i_netaddr) for i_netaddr in i_destination_server_network_addresses ]} lib.log_or_assert(args, "F.35 Destination OpenStack server arguments are generated with valid block-device-mapping", i_destination_server_args['block_device_mapping_v2'], locals()) lib.log_or_assert(args, "F.36 Destination OpenStack server arguments are generated with valid network configuration", i_destination_server_args['networks'], locals()) #pprint.pprint(i_destination_server_args) i_destination_server = destination_project_conn.compute.create_server(**i_destination_server_args) lib.log_or_assert(args, f"F.37 Destination OpenStack server (name:{i_destination_server.name}) is created", i_destination_server, locals()) i_destination_server = destination_project_conn.compute.wait_for_server(i_destination_server) lib.log_or_assert(args, f"F.38 Destination OpenStack server (name:{i_destination_server.name}) got ACTIVE", i_destination_server.status == 'ACTIVE', locals()) # add security groups to the destination server (if missing) for i_destination_server_security_group_id, i_destination_server_security_group_name in {(i_destination_server_security_group.id, i_destination_server_security_group.name) for i_destination_server_security_group in i_destination_server_security_groups}: if {'name': i_destination_server_security_group_name } not in i_destination_server.security_groups: destination_project_conn.add_server_security_groups(i_destination_server.id, i_destination_server_security_group_id) if i_source_server_has_fip: # add FIP as source VM has it i_destination_server_fip = destination_project_conn.network.create_ip(floating_network_id=destination_fip_network.id) lib.log_or_assert(args, f"F.39 Destination OpenStack server (name:{i_destination_server.name}) FIP is created ({i_destination_server_fip.floating_ip_address})", i_destination_server_fip, locals()) i_destination_server_port = lib.get_server_floating_ip_port(destination_project_conn, i_destination_server) lib.log_or_assert(args, f"F.40 Destination OpenStack server (name:{i_destination_server.name}) FIP port is detected", i_destination_server_port, locals()) destination_project_conn.network.add_ip_to_port(i_destination_server_port, i_destination_server_fip) args.logger.info(f"F.41 Source OpenStack server name:{i_source_server_detail.name} migrated into destination one name:{i_destination_server.name} id:{i_destination_server.id}") # EXPLICIT OpenStack volume migration if args.explicit_volume_names: for i_source_volume_name in args.explicit_volume_names: i_source_volume = source_project_conn.block_storage.find_volume(i_source_volume_name) if not i_source_volume: args.logger.info(f"H.1 Source volume migration skipped as does not exist (name:{i_source_volume_name})") continue if i_source_volume.status != 'available': args.logger.info(f"H.2 Source volume migration skipped as it is not in state available (name:{i_source_volume_name}, state:{i_source_volume.status}). " \ "Note in-use volumes are being migrated in VM server migration part.") continue i_dst_volume = destination_project_conn.block_storage.create_volume(name=lib.get_dst_resource_name(args, i_source_volume.name), size=i_source_volume.size, description=lib.get_dst_resource_desc(args, i_source_volume.description, i_source_volume.id)) lib.log_or_assert(args, f"H.3 Destination OpenStack volume created (name:{i_dst_volume.name}, id:{i_dst_volume.id})", i_dst_volume) i_dst_volume_status = lib.wait_for_ostack_volume_status(destination_project_conn, i_dst_volume.id, 'available') lib.log_or_assert(args, f"H.4 Destination OpenStack volume available (name:{i_dst_volume.name}, id:{i_dst_volume.id})", i_dst_volume_status == 'available') i_volume_mapping = {'source': {'ceph_pool_name': args.source_ceph_cinder_pool_name, 'ceph_rbd_image_name': i_source_volume.id}, 'destination': {'ceph_pool_name': args.destination_ceph_cinder_pool_name, 'volume_id': i_dst_volume.id}} clib.migrate_rbd_image(args, i_volume_mapping) i_dst_volume_detail = destination_project_conn.block_storage.find_volume(i_dst_volume.id) lib.log_or_assert(args, f"H.5 Destination OpenStack volume available (name:{i_dst_volume_detail.name}, id:{i_dst_volume_detail.id})", i_dst_volume_detail.status == 'available') # main() call (argument parsing) # --------------------------------------------------------------------------- if __name__ == "__main__": AP = argparse.ArgumentParser(epilog=globals().get('__doc__'), formatter_class=argparse.RawDescriptionHelpFormatter) AP.add_argument('--source-openrc', default=None, type=argparse.FileType('r'), required=True, help='Source cloud authentication (OpenRC file)') AP.add_argument('--destination-openrc', default=None, type=argparse.FileType('r'), required=True, help='Destination cloud authentication (OpenRC file)') AP.add_argument('--ceph-migrator-host', default='controller-ostack.stage.cloud.muni.cz', help='OpenStack migrator ceph node host') AP.add_argument('--ceph-migrator-user', default='root', help='OpenStack migrator ceph node username') AP.add_argument('--ceph-migrator-sshkeyfile', default=None, type=argparse.FileType('r'), help='OpenStack migrator SSH keyfile') AP.add_argument('--ceph-migrator-host-base-dir', default='/root/migrator', help='OpenStack ceph migrator base directory for scripts and operations on ceph mogrator host') AP.add_argument('--source-ceph-cinder-pool-name', default='prod-cinder-volumes', help='Source OpenStack/ceph cloud Cinder pool name') AP.add_argument('--source-ceph-ephemeral-pool-name', default='prod-ephemeral-vms', help='Source OpenStack/ceph cloud "ephemeral on ceph" or "libvirt ephemeral" pool name') AP.add_argument('--destination-ceph-cinder-pool-name', default='cloud-cinder-volumes-prod-brno', help='Destination OpenStack/ceph cloud Cinder pool name') AP.add_argument('--destination-ceph-ephemeral-pool-name', default='cloud-ephemeral-volumes-prod-brno', help='Destination OpenStack/ceph cloud "ephemeral on ceph" or "libvirt ephemeral" pool name') AP.add_argument('--source-keypair-xml-dump-file', default='/root/migrator/prod-nova_api_key_pairs.dump.xml', help='Source OpenStack cloud keypair SQL/XML dump file name (on ceph-migrator-host)') AP.add_argument('--source-servers-left-shutoff', default=False, required=False, action='store_true', help='Migrated source servers are left SHUTOFF (i.e. not started automatically).') AP.add_argument('--destination-bootable-volume-image-name', default='cirros-0-x86_64', help='Destination cloud bootable volumes are made on top of public image. Name of destination cloud image.') AP.add_argument('--destination-ipv4-external-network', default='external-ipv4-general-public', help='Destination cloud IPV4 external network.') AP.add_argument('--destination-entity-name-prefix', default='migrated-', help='Destination cloud entity name prefix.') AP.add_argument('--destination-entity-description-suffix', default=', migrated(id:{})', help='Destination cloud entity description suffix.') AP.add_argument('--project-name', default=None, required=True, help='OpenStack project name (identical name in both clouds required)') AP.add_argument('--explicit-server-names', default=None, required=False, help='(Optional) List of explicit server names or IDs to be migrated. Delimiter comma or space.') AP.add_argument('--explicit-volume-names', default=None, required=False, help='(Optional) List of explicit volume (names or) IDs to be migrated. Delimiter comma or space.') AP.add_argument('--migrate-also-inactive-servers', default=False, required=False, action='store_true', help='(Optional) Migrate also inactive servers (i.e. PAUSED/SHUTOFF).') AP.add_argument('--validation-a-source-server-id', default=None, required=True, help='For validation any server ID from source OpenStack project') AP.add_argument('--exception-trace-file', default="project-migrator.dump", required=False, help='Exception / assert dump state file') AP.add_argument('--log-level', default="INFO", required=False, choices=[i_lvl for i_lvl in dir(logging) if i_lvl.isupper() and i_lvl.isalpha()], help='Executio log level (python logging)') AP.add_argument('--debugging', default=False, required=False, action='store_true', help='(Optional) Enter custom development debugging mode.') ARGS = AP.parse_args() ARGS.logger = logging.getLogger("project-migrator") ARGS.explicit_server_names = lib.get_resource_names_ids(ARGS.explicit_server_names) ARGS.explicit_volume_names = lib.get_resource_names_ids(ARGS.explicit_volume_names) logging.basicConfig(level=getattr(logging, ARGS.log_level), format='%(asctime)s %(name)s %(levelname)s %(message)s') if ARGS.debugging: import IPython #IPython.embed() sys.exit(main(ARGS))