Skip to content
Snippets Groups Projects
lib.py 46.7 KiB
Newer Older
""" OpenStack project migrator library """

import json
import re
import pprint
import time

import xmltodict
import paramiko
import openstack
from keystoneauth1.identity import v3
from keystoneauth1 import session

def wait_for_keypress(msg="Press Enter to continue..."):
    """ """
    return input("Press Enter to continue...")

def normalize_servers(servers):
    """ list of server names/IDs separated by space of comma returned as list of strings or None """
    if isinstance(servers, str) and servers:
        return servers.replace(","," ").split()
    return None

def trim_dict(dict_data, allowed_keys=None, denied_keys=None):
    """ transform input dictionary and filter its keys with allowed_keys and denied_keys sequences """
    int_allowed_keys = allowed_keys if allowed_keys else tuple()
    int_denied_keys = denied_keys if denied_keys else tuple()
    if int_allowed_keys:
        return {i_key: dict_data[i_key] for i_key in dict_data if i_key in int_allowed_keys}
    if int_denied_keys:
        return {i_key: dict_data[i_key] for i_key in dict_data if i_key not in int_denied_keys}
    return dict_data

def executed_as_admin_user_in_ci():
    """ identity the script user within CI pipeline """
    return os.environ.get('GITLAB_USER_LOGIN') in ('246254', '252651', 'Jan.Krystof', 'moravcova', '469240', 'Josef.Nemec', '247801')

def executed_in_ci():
    """ detect CI environment """
    envvar_names = ('CI_JOB_NAME', 'CI_REPOSITORY_URL', 'GITLAB_USER_LOGIN')
    return {i_envvar_name in os.environ for i_envvar_name in envvar_names} == {True}

def get_ostack_project_names(project_name):
    """ get source and destination ostack project names """
    if '->' in project_name:
        return project_name.split('->', 1)
    return project_name, project_name

def get_destination_network(source_network):
    """ LUT for networks """
    network_mapping = {
        # shared
        "78-128-250-pers-proj-net" :  "internal-ipv4-general-private",
        "147-251-115-pers-proj-net" : "internal-ipv4-general-private",
        "public-muni-v6-432" :        "external-ipv6-general-public",
        # external
        "public-muni-147-251-21-GROUP": "external-ipv4-general-public",
        "public-cesnet-78-128-250-PERSONAL": "external-ipv4-general-public",
        "public-cesnet-78-128-251-GROUP": "external-ipv4-general-public",
        "provider-public-cerit-sc-147-251-253": "external-ipv4-general-public",
        "public-muni-147-251-115-PERSONAL": "external-ipv4-general-public",
        "public-muni-147-251-124-GROUP": "external-ipv4-general-public",
        "public-cesnet-195-113-167-GROUP": "external-ipv4-general-public",
        "public-muni-147-251-11-128-254": "external-ipv4-general-public",
        "public-muni-CERIT-FI-147-251-88-132-254": "external-ipv4-general-public",
        "public-muni-CSIRT-MU-217-69-96-64-240": "external-ipv4-general-public",
        "public-muni-csirt-147-251-125-16-31": "external-ipv4-general-public",
        "provider-public-cerit-sc-147-251-254": "external-ipv4-general-public",
        # group project internal network
        "group-project-network": "group-project-network"
        }
    if source_network in network_mapping.keys():
        return network_mapping[source_network]
    return None

def get_destination_subnet(source_subnet):
    """ LUT for networks """
    subnet_mapping = {
        # TODO: shared

        # group project internal network
        "group-project-network-subnet": "group-project-network-subnet"
        }
    if source_subnet in subnet_mapping.keys():
        return subnet_mapping[source_subnet]
    return None

def get_destination_router(source_router):
    """ LUT for networks """
    router_mapping = {
        # TODO: shared

        # group project internal network
        "router": "group-project-router"
        }
    if source_router in router_mapping.keys():
        return router_mapping[source_router]
    return None


def get_destination_flavor(source_flavor):
    """ LUT for flavors """
    flavor_mapping = {
        #'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
        #'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
        #'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
        'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram', # nesedi velikost disku v G2 je 80 misto 35
        'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram', # ok
        'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram', # nesedi velikost disku v G2 je 80 misto 35
        'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram', # ok
        #'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
        'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500', # neni v G2 a je potreba
        'hpc.16core-128ram': 'c3.16core-128ram', # neni v G2 a je potreba
        'hpc.16core-256ram': 'c3.16core-256ram', # neni v G2 a je potreba
        'hpc.16core-32ram': 'c2.16core-30ram', # ok
        'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk', # neni v G2 a je potreba
        'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd', # neni v G2 a je potreba
        'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram', # ok
        'hpc.18core-48ram': '', # neni v G2 a je potreba
        'hpc.18core-64ram-dukan': 'c2.24core-60ram', # nemusime resit
        'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd', # nemusime resit
        'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500', # neni v G2 a je potreba
        'hpc.30core-256ram': 'c3.30core-256ram', # neni v G2 a je potreba
        'hpc.30core-64ram': 'c3.32core-60ram', # v G2 je o 2 CPU vic
        'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram', # ok
        'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', #  ok
        'hpc.4core-4ram': 'e1.medium', # nemusime resit
        'hpc.8core-128ram': 'c3.8core-128ram', # neni v G2 a je potreba
        'hpc.8core-16ram': 'c2.8core-16ram', # ok
        'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram', # nemusime resit
        'hpc.8core-256ram': None, # nemusime resit
        'hpc.8core-32ram-dukan': 'c2.8core-30ram', # nemusime resit
        'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram', # ok
        'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram', # ok
        'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram', # ok
        'hpc.8core-8ram': 'e1.1xlarge', # v G2 je o 20 GB mensi disk
        'hpc.hdh-ephem': 'hpc.hdh', # neni a je potreba
        'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500', # neni potreba
        'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # neni potreba
        #'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
        'hpc.large': 'g2.3xlarge', # ok
        'hpc.medium': 'c2.8core-30ram', # ok
        'hpc.small': 'c2.4core-16ram', # ok
        'hpc.xlarge': None, # neni v G2
        'hpc.xlarge-memory': 'c3.xlarge-memory', # neni v G2
        'standard.16core-32ram': 'g2.2xlarge', # ok
        'standard.20core-128ram': 'e1.20core-128ram', # neni potreba
        'standard.20core-256ram': 'e1.20core-256ram', # neni v G2
        'standard.2core-16ram': 'c3.2core-16ram', # ok
        'standard.large': 'e1.large', # ok pripadne jeste c3.4core-8ram
        'standard.medium': 'e1.medium', # o 2 vice CPU
        'standard.memory': 'c3.2core-30ram', # pripadne i c2.2core-30ram
        'standard.one-to-many': 'c3.24core-60ram', # v G2 je o 4 vice CPU
        'standard.small': 'e1.small', # 2x vice ram a CPU u G2
        'standard.tiny': 'e1.tiny', # 2x vice ram a CPU u G2
        'standard.xlarge': 'e1.2xlarge', # o 4 vice CPU G2
        'standard.xlarge-cpu': 'e1.2xlarge', # ok
        'standard.xxlarge': 'c2.8core-30ram', # ok
        'standard.xxxlarge': 'c3.8core-60ram'  # ok
    }
    assert source_flavor in flavor_mapping, "Source flavor can be mapped to destination one"
    assert flavor_mapping[source_flavor], "Source flavor mapping is not valid"
    return flavor_mapping[source_flavor]

def normalize_table_data_field(data_field):
    """ normalize single data field (single data insert) """
    int_dict = {}
    i_name_key = '@name'
    for i_data_field_item in data_field:
        i_value_key = [ i_k for i_k in i_data_field_item.keys() if i_k != i_name_key][0]
        int_dict[i_data_field_item[i_name_key]] = i_data_field_item[i_value_key]
    return int_dict

def normalize_table_data(data):
    """ normalize whole table data """
    int_list = []
    for i_data_field in data:
        int_list.append(normalize_table_data_field(i_data_field['field']))
    return int_list

def get_migrated_resource_name(args, name):
    """ translate original name to destination one """
    return f"{args.destination_entity_prefix}{name}"

def get_openrc(file_handle):
    """ parse and return OpenRC file """
    openrc_vars = {}

    for line in file_handle:
        match = re.match(r'^export (\w+)=(.+)$', line.strip())
        if match:
            openrc_vars[match.group(1)] = match.group(2).strip('"')
    return openrc_vars


def get_ostack_connection(openrc_vars):
    """ """
    auth_args = {
        'auth_url': openrc_vars.get('OS_AUTH_URL'),
        'username': openrc_vars.get('OS_USERNAME'),
        'password': openrc_vars.get('OS_PASSWORD'),
        'project_name': openrc_vars.get('OS_PROJECT_NAME'),
        'project_domain_name': openrc_vars.get('OS_PROJECT_DOMAIN_NAME'),
        'user_domain_name': openrc_vars.get('OS_USER_DOMAIN_NAME'),
        'project_domain_id': openrc_vars.get('OS_PROJECT_DOMAIN_ID'),
        'user_domain_id': openrc_vars.get('OS_USER_DOMAIN_ID'),
    }
    connection_args = {
        'compute_api_version': openrc_vars.get('OS_COMPUTE_API_VERSION'),
        'identity_api_version': openrc_vars.get('OS_IDENTITY_API_VERSION'),
        'volume_api_version': openrc_vars.get('OS_VOLUME_API_VERSION')
    }
    auth = v3.Password(**auth_args)
    ostack_sess = session.Session(auth=auth)
    ostack_conn = openstack.connection.Connection(session=ostack_sess, **connection_args)
    return ostack_conn

def get_ostack_project(ostack_connection, project_name):
    project = None
    for i_project in ostack_connection.list_projects():
        if i_project.name == project_name:
            project = i_project
    return project

def get_ostack_project_type(ostack_connection, project):
    """ detect project type, return 'group' / 'personal' / 'other' """
    if project.name in [ i_user.name for i_user in ostack_connection.list_users() ]:
        return "personal"
    return "group"

def get_ostack_project_security_groups(ostack_connection, project=None):
    security_groups = []
    if project:
        for i_security_group in ostack_connection.network.security_groups():
            if i_security_group.tenant_id == project.id:
                security_groups.append(i_security_group)
        return security_groups
    return tuple(ostack_connection.network.security_groups())

def get_ostack_project_keypairs(ostack_connection, project=None):
    return ostack_connection.list_keypairs()
def get_ostack_project_keypairs2(ostack_connection, project=None):
    return list(ostack_connection.compute.keypairs())


def get_ostack_project_servers(ostack_connection, project=None):
    return tuple(ostack_connection.compute.servers())

def get_ostack_project_volumes(ostack_connection, project=None):
    return ostack_connection.block_store.volumes()

def get_ostack_project_flavors(ostack_connection, project=None):
    return tuple(ostack_connection.compute.flavors())

def get_resource_details(resources):
    """ inspect resources """
    for i_resource in resources:
        print(i_resource)
        pprint.pprint(i_resource)

def remote_cmd_exec(hostname, username, key_filename, command):
    """ executes remote command, returs stdout, stderr and exit-code or Exception """
    # Create SSH client
    ssh_client = paramiko.SSHClient()
    # Automatically add untrusted hosts
    ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ecode = None

    try:
        # Connect to the remote host
        pkey = paramiko.RSAKey.from_private_key_file(key_filename)
        ssh_client.connect(hostname, username=username, pkey=pkey, look_for_keys=False)

        # Execute the command, read the output and close
        stdin, stdout, stderr = ssh_client.exec_command(command)
        output = stdout.read().decode().strip()
        error = stderr.read().decode().strip()
        ecode = stdout.channel.recv_exit_status()
        ssh_client.close()

        return output, error, ecode

    except Exception as e:
        print("Error:", e)
        return None, None, e

def get_ceph_client_name(args, ceph_src_pool_name, ceph_dst_pool_name=None):
    int_pool_name = ceph_dst_pool_name if ceph_dst_pool_name else ceph_src_pool_name

    return "client.cinder" if int_pool_name in (args.source_ceph_cinder_pool_name, args.source_ceph_ephemeral_pool_name,) else "client.migrator"

def ceph_rbd_images_list(args, pool_name):
    """ """
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"/root/migrator/ceph-rbd-images-list.sh {pool_name}")
    assert stdout, f"RBD pool ({pool_name}) images received successfully (non-empty RBD list)"
    assert ecode == 0, f"RBD pool ({pool_name}) images received successfully (ecode)"
    return stdout.splitlines()

def ceph_rbd_image_info(args, pool_name, rbd_image_name):
    """ get ceph RBD image information """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-info.sh {pool_name} {rbd_image_name}")
    return json.loads(stdout), stderr, ecode



def ceph_rbd_image_exists(args, pool_name, rbd_image_name):
    """ detect whether RBD image {pool_name}/{rbd_image_name} exists """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-exists.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_delete(args, pool_name, rbd_image_name):
    """ delete RBD image {pool_name}/{rbd_image_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-delete.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_flatten(args, pool_name, rbd_image_name):
    """ flatten RBD image {pool_name}/{rbd_image_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-flatten.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_snapshot_name,
                         dst_pool_name, dst_rbd_image_name):
    """ clone RBD image {src_pool_name}/{src_rbd_image_name}@{src_rbd_image_snapshot_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
    ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-clone.sh {src_pool_name} {src_rbd_image_name} {src_rbd_image_snapshot_name} {dst_pool_name} {dst_rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name):
    """ copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
    ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
    cmd = f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-copy.sh {src_pool_name} {src_rbd_image_name} {dst_pool_name} {dst_rbd_image_name}"
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            cmd)
    return stdout.splitlines(), stderr, ecode


def ceph_rbd_image_snapshot_exists(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ detect whether RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} exists """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-exists.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_snapshot_create(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ create RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-create.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ delete RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-delete.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode


def assert_entity_ownership(entities, project):
    """ """
    for i_entity in entities:
        assert i_entity.project_id == project.id, f"Entity belongs to expected project (id: {project.id})"

def get_source_keypairs(args):
    """ """
    reply_stdout, reply_stderr, reply_ecode = remote_cmd_exec(args.ceph_migrator_host,
                                                              args.ceph_migrator_user,
                                                              args.ceph_migrator_sshkeyfile.name,
                                                              f"cat {args.source_keypair_xml_dump_file}")
    assert reply_ecode == 0, "Keypairs received"
    table_dictdata = xmltodict.parse(reply_stdout)
    table_data_dictdata = table_dictdata['mysqldump']['database']['table_data']['row']
    return normalize_table_data(table_data_dictdata)

def get_source_keypair(keypairs, keypair_name, user_id):
    """ """
    keypairs_selected = [ i_keypair for i_keypair in keypairs if i_keypair.get("name", "") == keypair_name and i_keypair.get("user_id", "") == user_id ]
    if keypairs_selected:
        return keypairs_selected[0]
    return None

def create_keypair(args, ostack_connection, keypair):
    """ create openstack keypair object """
    return ostack_connection.compute.create_keypair(name=get_migrated_resource_name(args, keypair['name']),
                                                    public_key=keypair['public_key'], type=keypair['type'])

def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_group, dst_project, recursion_stack=None):
    """ create openstack security group[s] """
    int_recursion_stack = {} if recursion_stack is None else recursion_stack
    int_sg = dst_ostack_conn.network.create_security_group(name=get_migrated_resource_name(args, src_security_group.name),
                                                           description=f"{src_security_group.description}, g1-to-g2-migrated(g1-id:{src_security_group.id})",
                                                           project_id=dst_project.id)
    int_recursion_stack[src_security_group.id] = int_sg.id

    for i_rule in src_security_group.security_group_rules:
        # browse security group rules
        i_mod_rule = trim_dict(i_rule, denied_keys=['id', 'project_id', 'tenant_id', 'revision_number', 'updated_at', 'created_at', 'tags', 'standard_attr_id', 'normalized_cidr'])
        i_mod_rule['security_group_id'] = int_sg.id
        i_mod_rule = {i_k: i_mod_rule[i_k] for i_k in i_mod_rule if i_mod_rule[i_k] is not None}
        if i_mod_rule.get('remote_group_id') is not None:
            if i_mod_rule['remote_group_id'] in int_recursion_stack:
                # keep reference to itself or known (already created) SGs
                i_mod_rule['remote_group_id'] = int_recursion_stack[i_mod_rule['remote_group_id']]
            # get linked source SG
            elif _src_sg := src_ostack_conn.network.find_security_group(i_mod_rule['remote_group_id']):
                if _dst_sg := dst_ostack_conn.network.find_security_group(get_migrated_resource_name(args, _src_sg.name),
                                                                          project_id=dst_project.id):
                    i_mod_rule['remote_group_id'] = _dst_sg.id
                else:
                    int_linked_sg = create_security_groups(args, src_ostack_conn, dst_ostack_conn,
                                                            _src_sg, dst_project,
                                                            copy.deepcopy(int_recursion_stack))
                    i_mod_rule['remote_group_id'] = int_linked_sg.id
            dst_ostack_conn.network.create_security_group_rule(**i_mod_rule)
        except openstack.exceptions.ConflictException as ex:
            pass

    return int_sg

def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project):
    """ duplicate all projects's openstack security group[s] """

    src_project_security_groups = tuple(src_ostack_conn.network.security_groups(project_id=src_project.id))

    for i_src_security_group in src_project_security_groups:
        j_dst_security_group_found = False
        for j_dst_security_group in tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id)):
            if get_migrated_resource_name(args, i_src_security_group.name) == j_dst_security_group.name and \
               i_src_security_group.id in j_dst_security_group.description:
                j_dst_security_group_found = True
        if not j_dst_security_group_found:
            create_security_groups(args, src_ostack_conn, dst_ostack_conn, i_src_security_group, dst_project)

    return src_project_security_groups, tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id))


def log_or_assert(args, msg, condition, trace_details=None):
    """ log, assert, dump state """
    if not condition:
        with open(args.exception_trace_file, "w") as file:
            file.write(f"{msg}\n{pprint.pformat(trace_details)}\n\n{locals()}\n")
    assert condition, msg
    args.logger.info(msg)


def wait_for_ostack_server_status(ostack_connection, server_name_or_id, server_status, timeout=600):
    """ """
    int_start_timestamp = time.time()
    int_server = ostack_connection.compute.find_server(server_name_or_id)
    int_server_status = None
    while True:
        if time.time() > (int_start_timestamp + timeout):
            break
        int_server_status = ostack_connection.compute.find_server(int_server.id).status
        if int_server_status == server_status:
            break

    return int_server_status

def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_status, timeout=120):
    """ """
    int_start_timestamp = time.time()
    int_volume = ostack_connection.block_storage.find_volume(volume_name_or_id)
    int_volume_status = None
    while True:
        if time.time() > (int_start_timestamp + timeout):
            break
        int_volume_status = ostack_connection.block_storage.find_volume(int_volume.id).status
        if int_volume_status == volume_status:
            break

    return int_volume_status

def server_detect_floating_address(server):
    """ return True if server has attached floating IP address otherwise False """
    for _, i_ip_details in server.addresses.items():
        for i_ip_detail in i_ip_details:
            if str(i_ip_detail.get('version')) == '4' and i_ip_detail.get('OS-EXT-IPS:type') == 'floating':
                return True
    return False

def get_server_floating_ip_port(ostack_connection, server):
    """ set server's port where to put FIP, otherwise None """
    for i_port in ostack_connection.network.ports(device_id=server.id):
        for i_port_ip in i_port.fixed_ips:
            for i_ip_prefix in ('192.', '10.', '172.'):
                if str(i_port_ip.get('ip_address')).startswith(i_ip_prefix):
                    return i_port
    return None


def get_server_block_device_mapping(args, server_volume_attachment, server_volume, server_root_device_name):
    """ return server block device mapping item """
    return {'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
                       'volume_attachment_id': server_volume_attachment.id,
                       'volume_id': server_volume.id,
                       'ceph_pool_name': args.source_ceph_cinder_pool_name,
                       'ceph_rbd_image_name': server_volume.id},
            'destination': {'volume_size': server_volume.size,
                            'volume_name': get_migrated_resource_name(args, server_volume.name),
                            'volume_description': server_volume.description,
                            'volume_id': None,
                            'ceph_pool_name': args.destination_ceph_cinder_pool_name,
                            'device_name': os.path.basename(server_volume_attachment.device),
                            'volume_bootable': server_root_device_name == server_volume_attachment.device}}




def migrate_rbd_image(args, server_block_device_mapping):
    """ migrate G1 ceph RBD image to G2 ceph """

    ## G1: detect existing G1 RBD image
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
    source_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
                                                                        server_block_device_mapping['source']['ceph_pool_name'],
                                                                        server_block_device_mapping['source']['ceph_rbd_image_name'])
    log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
    log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - single image returned",
                  source_server_rbd_images and len(source_server_rbd_images) == 1, locals())
    source_server_rbd_image = source_server_rbd_images[0]


    ## G2: find volume
    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
    destination_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
                                                                                   server_block_device_mapping['destination']['ceph_pool_name'],
                                                                                   server_block_device_mapping['destination']['volume_id'])
    log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
    log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - single image returned",
                  destination_server_rbd_images and len(destination_server_rbd_images) == 1, locals())
    destination_server_rbd_image = destination_server_rbd_images[0]

    ## G1: create RBD image protected snapshot
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
    source_rbd_image_snapshot_name = f"g1-g2-migration-{source_server_rbd_image}"
    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
                                                                    server_block_device_mapping['source']['ceph_pool_name'],
                                                                    source_server_rbd_image,
                                                                    source_rbd_image_snapshot_name)
    log_or_assert(args,
                  "G.3 Source OpenStack VM RBD image has non-colliding snapshot " \
                  f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
                  ecode != 0, locals())

    stdout, stderr, ecode = ceph_rbd_image_snapshot_create(args,
                                                                    server_block_device_mapping['source']['ceph_pool_name'],
                                                                    source_server_rbd_image,
                                                                    source_rbd_image_snapshot_name)
    log_or_assert(args,
                  "G.4 Source OpenStack VM RBD image snapshot created " \
                  f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
                  ecode == 0, locals())


    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
                                                                    server_block_device_mapping['source']['ceph_pool_name'],
                                                                    source_server_rbd_image,
                                                                    source_rbd_image_snapshot_name)
    log_or_assert(args,
                  "G.5 Source OpenStack VM RBD image snapshot exists " \
                  f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
                  ecode == 0, locals())

    ## G2: delete RBD image
    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
    ## G2: confirm volume is deleted
    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
    stdout, stderr, ecode = ceph_rbd_image_delete(args,
                                                        server_block_device_mapping['destination']['ceph_pool_name'],
                                                        destination_server_rbd_image)
    log_or_assert(args,
                  f"G.6 Destination OpenStack VM RBD image deletion succeeded ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
                  ecode == 0, locals())
    stdout, stderr, ecode = ceph_rbd_image_exists(args,
                                                        server_block_device_mapping['destination']['ceph_pool_name'],
                                                        destination_server_rbd_image)
    log_or_assert(args,
                  f"G.7 Destination OpenStack VM RBD image does not exist ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
                  ecode != 0, locals())


    ## G1: clone from snapshot
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
    source_rbd_cloned_image_name = f"g1-g2-migration-{source_server_rbd_image}"
    stdout, stderr, ecode = ceph_rbd_image_clone(args,
                                                        server_block_device_mapping['source']['ceph_pool_name'],
                                                        source_server_rbd_image,
                                                        source_rbd_image_snapshot_name,
                                                        server_block_device_mapping['source']['ceph_pool_name'],
                                                        source_rbd_cloned_image_name)
    log_or_assert(args,
                  "G.8 Source OpenStack VM RBD image cloned succesfully " \
                  f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name} -> {server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                  ecode == 0, locals())
    stdout, stderr, ecode = ceph_rbd_image_exists(args,
                                                        server_block_device_mapping['source']['ceph_pool_name'],
                                                        source_rbd_cloned_image_name)
    log_or_assert(args,
                  f"G.9 Source OpenStack VM cloned RBD image exists ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                  ecode == 0, locals())

    ## G1: flatten cloned RBD image
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
    stdout, stderr, ecode = ceph_rbd_image_flatten(args,
                                                            server_block_device_mapping['source']['ceph_pool_name'],
                                                            source_rbd_cloned_image_name)
    log_or_assert(args,
                  f"G.10 Source OpenStack VM cloned RBD image flatten successfully ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                  ecode == 0, locals())

    ## G1->G2: copy RBD image to target pool
    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
    stdout, stderr, ecode = ceph_rbd_image_copy(args,
                                                        server_block_device_mapping['source']['ceph_pool_name'],
                                                        source_rbd_cloned_image_name,
                                                        server_block_device_mapping['destination']['ceph_pool_name'],
                                                        destination_server_rbd_image)
    log_or_assert(args,
                  "G.11 Source OpenStack VM RBD image copied G1 -> G2 succesfully" \
                  f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name} -> {server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image}",
                  ecode == 0, locals())
    stdout, stderr, ecode = ceph_rbd_image_exists(args,
                                                        server_block_device_mapping['destination']['ceph_pool_name'],
                                                        destination_server_rbd_image)
    log_or_assert(args,
                  f"G.12 Destination OpenStack VM RBD image exists ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
                  ecode == 0, locals())

    ## G1: delete cloned RBD image
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
    stdout, stderr, ecode = ceph_rbd_image_delete(args,
                                                        server_block_device_mapping['source']['ceph_pool_name'],
                                                        source_rbd_cloned_image_name)
    log_or_assert(args,
                  f"G.13 Source OpenStack VM RBD cloned image deletion succeeded ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                  ecode == 0, locals())
    stdout, stderr, ecode = ceph_rbd_image_exists(args,
                                                        server_block_device_mapping['source']['ceph_pool_name'],
                                                        source_rbd_cloned_image_name)
    log_or_assert(args,
                  f"G.14 Source OpenStack VM cloned RBD image does not exist anymore ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                  ecode != 0, locals())

    ## G1: remove created snapshot
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
                                                                    server_block_device_mapping['source']['ceph_pool_name'],
                                                                    source_server_rbd_image,
                                                                    source_rbd_image_snapshot_name)
    log_or_assert(args,
                  "G.15 Source OpenStack VM RBD image snapshot still exists " \
                  f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
                  ecode == 0, locals())
    stdout, stderr, ecode = ceph_rbd_image_snapshot_delete(args,
                                                                    server_block_device_mapping['source']['ceph_pool_name'],
                                                                    source_server_rbd_image,
                                                                    source_rbd_image_snapshot_name)
    log_or_assert(args,
                  "G.16 Source OpenStack VM RBD image snapshot deletion succeeeded " \
                  f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
                  ecode == 0, locals())
    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
                                                                    server_block_device_mapping['source']['ceph_pool_name'],
                                                                    source_server_rbd_image,
                                                                    source_rbd_image_snapshot_name)
    log_or_assert(args,
                  "G.17 Source OpenStack VM RBD image snapshot does not exist anymore " \
                  f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
                  ecode != 0, locals())



def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project, src_network_name):
    """ Create matching OpenStack networking (network, subnet, router) """
    # read source network details
    src_network = src_ostack_conn.network.find_network(src_network_name, project_id=src_project.id)
    # read matching subnets details
    src_subnets = [ src_ostack_conn.network.find_subnet(i_src_subnet_id) for i_src_subnet_id in src_network.subnet_ids ]
    # read linked routers
    src_network_router_ports = [ i_src_router_port for i_src_router_port in src_ostack_conn.list_ports(filters={'network_id': src_network.id}) if i_src_router_port.device_owner == 'network:router_interface' ]
    src_network_routers_subnets = [ (src_ostack_conn.network.find_router(router_port.device_id), [rp_fixed_ip['subnet_id'] for rp_fixed_ip in router_port.fixed_ips if 'subnet_id' in rp_fixed_ip])  for router_port in src_network_router_ports ]


    # read external network
    dst_ext_network = dst_ostack_conn.network.find_network(args.destination_ipv4_external_network)

    # create network
    dst_network_name = get_migrated_resource_name(args, src_network_name)
    dst_network = dst_ostack_conn.network.find_network(dst_network_name,
                                                       project_id=dst_project.id)
    if not dst_network:
        dst_network = dst_ostack_conn.network.create_network(name=dst_network_name,
                                                             project_id=dst_project.id,
                                                             mtu=src_network.mtu,
                                                             description=f"{src_network.description}, g1 migrated id:{src_network.id}",
                                                             port_security_enabled=src_network.is_port_security_enabled)

    # create subnets
    dst_subnets = []
    subnet_mapping = {}
    for i_src_subnet in src_subnets:
        i_dst_subnet_name = get_migrated_resource_name(args, i_src_subnet.name)
        i_dst_subnet = dst_ostack_conn.network.find_subnet(get_migrated_resource_name(args, i_src_subnet.name),
                                                           project_id=dst_project.id)
        if not i_dst_subnet:
            i_dst_subnet = dst_ostack_conn.network.create_subnet(network_id=dst_network.id,
                                                                 name=i_dst_subnet_name,
                                                                 cidr=i_src_subnet.cidr,
                                                                 ip_version=i_src_subnet.ip_version,
                                                                 enable_dhcp=i_src_subnet.is_dhcp_enabled,
                                                                 project_id=dst_project.id,
                                                                 allocation_pools=i_src_subnet.allocation_pools,
                                                                 gateway_ip=i_src_subnet.gateway_ip,
                                                                 host_routes=i_src_subnet.host_routes,
                                                                 dns_nameservers=i_src_subnet.dns_nameservers,
                                                                 description=f"{i_src_subnet.description}, g1 migrated id:{i_src_subnet.id}")
        subnet_mapping[i_src_subnet.id] = i_dst_subnet.id
        dst_subnets.append(i_dst_subnet)

    # create router(s) and associate with subnet(s) (if needed)
    dst_network_routers = []
    for i_src_network_router, i_src_network_router_subnets in src_network_routers_subnets:

        i_dst_network_router_name = get_migrated_resource_name(args, i_src_network_router.name)
        i_dst_network_router = dst_ostack_conn.network.find_router(i_dst_network_router_name,
                                                                   project_id=dst_project.id)
        if not i_dst_network_router:
            i_dst_network_router = dst_ostack_conn.network.create_router(name=i_dst_network_router_name,
                                                                         description=f"{i_src_network_router.description}, g1 migrated id:{i_src_network_router.id}",
                                                                         project_id=dst_project.id,
                                                                         external_gateway_info={"network_id": dst_ext_network.id})
            for i_src_network_router_subnet in i_src_network_router_subnets:
                # TODO: Principally there may be also foreign subnets, find more general sulution
                if i_src_network_router_subnet in subnet_mapping:
                    dst_ostack_conn.add_router_interface(i_dst_network_router, subnet_id=subnet_mapping[i_src_network_router_subnet])

        dst_network_routers.append(i_dst_network_router)

    return dst_network, dst_subnets, dst_network_routers


def describe_server_network_connection(args, dst_ostack_conn, netaddr_dict):
    """ """
    # netaddr_dict{ 'dst-network': Network,
    #               'src-network-addresses': {'network-name': <source-network-name>,
    #                                         'addresses': [ ... ]} }
    fixed_port = None
    dst_network = netaddr_dict['dst-network']
    source_server_fixed_addresses = [i_addr['addr'] for i_addr in netaddr_dict['src-network-addresses']['addresses'] if i_addr.get('OS-EXT-IPS:type') == 'fixed']
    if len(source_server_fixed_addresses) == 1 and len(dst_network.subnet_ids) == 1:
        try:
            fixed_port = dst_ostack_conn.network.create_port(name=get_migrated_resource_name(args, "unknown"),
                                                             description="2",
                                                             network_id=dst_network.id,
                                                             fixed_ips=[{"ip_address": source_server_fixed_addresses[0],
                                                                         "subnet_id": dst_network.subnet_ids[0]}])
        except:
            pass

    if fixed_port:
        return {'port': fixed_port.id}
    return {'uuid': dst_network.id}