Skip to content
Snippets Groups Projects
project-migrator.py 61.8 KiB
Newer Older
#!/usr/bin/env python3
"""
OpenStack project multicloud migrator


Usage example:
 * ./project-migrator.py --source-openrc ~/c/prod-einfra_cz_migrator.sh.inc
                         --destination-openrc ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc
                         --project-name meta-cloud-new-openstack
                         --validation-a-source-server-id <>
                         --ceph-migrator-sshkeyfile $HOME/.ssh/id_rsa.LenovoThinkCentreE73
 * ./project-migrator.py --source-openrc ~/c/prod-einfra_cz_migrator.sh.inc --destination-openrc ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc --project-name meta-cloud-new-openstack --validation-a-source-server-id <> --ceph-migrator-sshkeyfile $HOME/.ssh/id_rsa.LenovoThinkCentreE73 --explicit-server-names freznicek-rook-internal-external-20-worker-1
 * 

import paramiko
import keystoneauth1.session
from keystoneauth1.identity import v3
from openstack import connection
from keystoneauth1 import session

def wait_for_keypress(msg="Press Enter to continue..."):
    """ """
    return input("Press Enter to continue...")

def normalize_servers(servers):
    """ list of server names/IDs separated by space of comma returned as list of strings or None """
    if isinstance(servers, str) and servers:
        return servers.replace(","," ").split()
    return None

def trim_dict(dict_data, allowed_keys=None, denied_keys=None):
    """ transform input dictionary and filter its keys with allowed_keys and denied_keys sequences """
    int_allowed_keys = allowed_keys if allowed_keys else tuple()
    int_denied_keys = denied_keys if denied_keys else tuple()
    if int_allowed_keys:
        return {i_key: dict_data[i_key] for i_key in dict_data if i_key in int_allowed_keys}
    elif int_denied_keys:
        return {i_key: dict_data[i_key] for i_key in dict_data if i_key not in int_denied_keys}
    return dict_data


def get_destination_network(source_network):
    """ LUT for networks """
    network_mapping = {
        # shared
        "78-128-250-pers-proj-net" :  "internal-ipv4-general-private",
        "147-251-115-pers-proj-net" : "internal-ipv4-general-private",
        "public-muni-v6-432" :        "external-ipv6-general-public",
        # external
        "public-muni-147-251-21-GROUP": "external-ipv4-general-public",
        "public-cesnet-78-128-250-PERSONAL": "external-ipv4-general-public",
        "public-cesnet-78-128-251-GROUP": "external-ipv4-general-public",
        "provider-public-cerit-sc-147-251-253": "external-ipv4-general-public",
        "public-muni-147-251-115-PERSONAL": "external-ipv4-general-public",
        "public-muni-147-251-124-GROUP": "external-ipv4-general-public",
        "public-cesnet-195-113-167-GROUP": "external-ipv4-general-public",
        "public-muni-147-251-11-128-254": "external-ipv4-general-public",
        "public-muni-CERIT-FI-147-251-88-132-254": "external-ipv4-general-public",
        "public-muni-CSIRT-MU-217-69-96-64-240": "external-ipv4-general-public",
        "public-muni-csirt-147-251-125-16-31": "external-ipv4-general-public",
        "provider-public-cerit-sc-147-251-254": "external-ipv4-general-public",
        # group project internal network
        "group-project-network": "group-project-network"
        }
    if source_network in network_mapping.keys():
        return network_mapping[source_network]
    return None

def get_destination_subnet(source_subnet):
    """ LUT for networks """
    subnet_mapping = {
        # TODO: shared

        # group project internal network
        "group-project-network-subnet": "group-project-network-subnet"
        }
    if source_subnet in subnet_mapping.keys():
        return subnet_mapping[source_subnet]
    return None

def get_destination_router(source_router):
    """ LUT for networks """
    router_mapping = {
        # TODO: shared

        # group project internal network
        "router": "group-project-router"
        }
    if source_router in router_mapping.keys():
        return router_mapping[source_router]
    return None


def get_destination_flavor(source_flavor):
    """ LUT for flavors """
    flavor_mapping = {
        #'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
        #'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
        #'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
        'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram', # nesedi velikost disku v G2 je 80 misto 35
        'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram', # ok
        'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram', # nesedi velikost disku v G2 je 80 misto 35
        'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram', # ok
        #'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
        'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500', # neni v G2 a je potreba
        'hpc.16core-128ram': 'c3.16core-128ram', # neni v G2 a je potreba
        'hpc.16core-256ram': 'c3.16core-256ram', # neni v G2 a je potreba
        'hpc.16core-32ram': 'c2.16core-30ram', # ok
        'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk', # neni v G2 a je potreba
        'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd', # neni v G2 a je potreba
        'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram', # ok
        'hpc.18core-48ram': '', # neni v G2 a je potreba
        'hpc.18core-64ram-dukan': 'c2.24core-60ram', # nemusime resit
        'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd', # nemusime resit
        'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500', # neni v G2 a je potreba
        'hpc.30core-256ram': 'c3.30core-256ram', # neni v G2 a je potreba
        'hpc.30core-64ram': 'c3.32core-60ram', # v G2 je o 2 CPU vic
        'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram', # ok
        'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', #  ok
        'hpc.4core-4ram': 'e1.medium', # nemusime resit
        'hpc.8core-128ram': 'c3.8core-128ram', # neni v G2 a je potreba
        'hpc.8core-16ram': 'c2.8core-16ram', # ok
        'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram', # nemusime resit
        'hpc.8core-256ram': None, # nemusime resit
        'hpc.8core-32ram-dukan': 'c2.8core-30ram', # nemusime resit
        'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram', # ok
        'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram', # ok
        'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram', # ok
        'hpc.8core-8ram': 'e1.1xlarge', # v G2 je o 20 GB mensi disk
        'hpc.hdh-ephem': 'hpc.hdh', # neni a je potreba
        'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500', # neni potreba
        'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # neni potreba
        #'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
        'hpc.large': 'g2.3xlarge', # ok
        'hpc.medium': 'c2.8core-30ram', # ok
        'hpc.small': 'c2.4core-16ram', # ok
        'hpc.xlarge': None, # neni v G2
        'hpc.xlarge-memory': 'c3.xlarge-memory', # neni v G2
        'standard.16core-32ram': 'g2.2xlarge', # ok
        'standard.20core-128ram': 'e1.20core-128ram', # neni potreba
        'standard.20core-256ram': 'e1.20core-256ram', # neni v G2
        'standard.2core-16ram': 'c3.2core-16ram', # ok
        'standard.large': 'e1.large', # ok pripadne jeste c3.4core-8ram
        'standard.medium': 'e1.medium', # o 2 vice CPU
        'standard.memory': 'c3.2core-30ram', # pripadne i c2.2core-30ram
        'standard.one-to-many': 'c3.24core-60ram', # v G2 je o 4 vice CPU
        'standard.small': 'e1.small', # 2x vice ram a CPU u G2
        'standard.tiny': 'e1.tiny', # 2x vice ram a CPU u G2
        'standard.xlarge': 'e1.2xlarge', # o 4 vice CPU G2
        'standard.xlarge-cpu': 'e1.2xlarge', # ok
        'standard.xxlarge': 'c2.8core-30ram', # ok
        'standard.xxxlarge': 'c3.8core-60ram'  # ok
    }
    assert source_flavor in flavor_mapping, "Source flavor can be mapped to destination one"
    assert flavor_mapping[source_flavor], "Source flavor mapping is not valid"
    return flavor_mapping[source_flavor]

def normalize_table_data_field(data_field):
    """ normalize single data field (single data insert) """
    int_dict = {}
    i_name_key = '@name'
    for i_data_field_item in data_field:
        i_value_key = [ i_k for i_k in i_data_field_item.keys() if i_k != i_name_key][0]
        int_dict[i_data_field_item[i_name_key]] = i_data_field_item[i_value_key]
    return int_dict

def normalize_table_data(data):
    """ normalize whole table data """
    int_list = []
    for i_data_field in data:
        int_list.append(normalize_table_data_field(i_data_field['field']))
    return int_list

def get_openrc(file_handle):
    """ parse and return OpenRC file """
    openrc_vars = {}

    for line in file_handle:
        match = re.match(r'^export (\w+)=(.+)$', line.strip())
        if match:
            openrc_vars[match.group(1)] = match.group(2).strip('"')
    return openrc_vars


def get_ostack_connection(openrc_vars):
    """ """
    auth_args = {
        'auth_url': openrc_vars.get('OS_AUTH_URL'),
        'username': openrc_vars.get('OS_USERNAME'),
        'password': openrc_vars.get('OS_PASSWORD'),
        'project_name': openrc_vars.get('OS_PROJECT_NAME'),
        'project_domain_name': openrc_vars.get('OS_PROJECT_DOMAIN_NAME'),
        'user_domain_name': openrc_vars.get('OS_USER_DOMAIN_NAME'),
        'project_domain_id': openrc_vars.get('OS_PROJECT_DOMAIN_ID'),
        'user_domain_id': openrc_vars.get('OS_USER_DOMAIN_ID'),
    }
    connection_args = {
        'compute_api_version': openrc_vars.get('OS_COMPUTE_API_VERSION'),
        'identity_api_version': openrc_vars.get('OS_IDENTITY_API_VERSION'),
        'volume_api_version': openrc_vars.get('OS_VOLUME_API_VERSION')
    }
    auth = v3.Password(**auth_args)
    ostack_sess = session.Session(auth=auth)
    ostack_conn = connection.Connection(session=ostack_sess, **connection_args)
    return ostack_conn

def get_ostack_project(ostack_connection, project_name):
    project = None
    for i_project in ostack_connection.list_projects():
        if i_project.name == project_name:
            project = i_project
    return project

def get_ostack_project_type(ostack_connection, project):
    """ detect project type, return 'group' / 'personal' / 'other' """
    if project.name in [ i_user.name for i_user in ostack_connection.list_users() ]:
      return "personal"
    return "group"

def get_ostack_project_security_groups(ostack_connection, project=None):
    security_groups = []
    if project:
        for i_security_group in ostack_connection.network.security_groups():
            if i_security_group.tenant_id == project.id:
                security_groups.append(i_security_group)
        return security_groups
    else:
        return tuple(ostack_connection.network.security_groups())
def get_ostack_project_keypairs(ostack_connection, project=None):
    return ostack_connection.list_keypairs()
def get_ostack_project_keypairs2(ostack_connection, project=None):
    return list(ostack_connection.compute.keypairs())
def get_ostack_project_servers(ostack_connection, project=None):
    return tuple(ostack_connection.compute.servers())

def get_ostack_project_volumes(ostack_connection, project=None):
    return ostack_connection.block_store.volumes()

def get_ostack_project_flavors(ostack_connection, project=None):
    return tuple(ostack_connection.compute.flavors())

def get_resource_details(resources):
    """ inspect resources """
    for i_resource in resources:
        print(i_resource)
        pprint.pprint(i_resource)

def remote_cmd_exec(hostname, username, key_filename, command):
    """ executes remote command, returs stdout, stderr and exit-code or Exception """
    # Create SSH client
    ssh_client = paramiko.SSHClient()
    # Automatically add untrusted hosts
    ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    try:
        # Connect to the remote host
        pkey = paramiko.RSAKey.from_private_key_file(key_filename)
        ssh_client.connect(hostname, username=username, pkey=pkey, look_for_keys=False)

        # Execute the command, read the output and close
        stdin, stdout, stderr = ssh_client.exec_command(command)
        output = stdout.read().decode().strip()
        error = stderr.read().decode().strip()
        ecode = stdout.channel.recv_exit_status()
def get_ceph_client_name(args, ceph_src_pool_name, ceph_dst_pool_name=None):
    int_pool_name = ceph_dst_pool_name if ceph_dst_pool_name else ceph_src_pool_name

    return "client.cinder" if int_pool_name in (args.source_ceph_cinder_pool_name, args.source_ceph_ephemeral_pool_name,) else "client.migrator"

def ceph_rbd_images_list(args, pool_name):
    """ """
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"/root/migrator/ceph-rbd-images-list.sh {pool_name}")
    assert stdout, f"RBD pool ({pool_name}) images received successfully (non-empty RBD list)"
    assert ecode == 0, f"RBD pool ({pool_name}) images received successfully (ecode)"
    return stdout.splitlines()

def ceph_rbd_image_info(args, pool_name, rbd_image_name):
    """ get ceph RBD image information """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-info.sh {pool_name} {rbd_image_name}")

def ceph_rbd_image_exists(args, pool_name, rbd_image_name):
    """ detect whether RBD image {pool_name}/{rbd_image_name} exists """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-exists.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_delete(args, pool_name, rbd_image_name):
    """ delete RBD image {pool_name}/{rbd_image_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-delete.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_flatten(args, pool_name, rbd_image_name):
    """ flatten RBD image {pool_name}/{rbd_image_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-flatten.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_snapshot_name,
                         dst_pool_name, dst_rbd_image_name):
    """ clone RBD image {src_pool_name}/{src_rbd_image_name}@{src_rbd_image_snapshot_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
    ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-clone.sh {src_pool_name} {src_rbd_image_name} {src_rbd_image_snapshot_name} {dst_pool_name} {dst_rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name):
    """ copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
    ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
    cmd = f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-copy.sh {src_pool_name} {src_rbd_image_name} {dst_pool_name} {dst_rbd_image_name}"
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            cmd)
    return stdout.splitlines(), stderr, ecode


def ceph_rbd_image_snapshot_exists(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ detect whether RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} exists """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-exists.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_snapshot_create(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ create RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-create.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ delete RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-delete.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode


def assert_entity_ownership(entities, project):
    """ """
    for i_entity in entities:
        assert i_entity.project_id == project.id, f"Entity belongs to expected project (name:{project.name}, id: {project.id})"

def get_source_keypairs(args):
    """ """
    reply_stdout, reply_stderr, reply_ecode = remote_cmd_exec(args.ceph_migrator_host,
                                                              args.ceph_migrator_user,
                                                              args.ceph_migrator_sshkeyfile.name,
                                                              f"cat {args.source_keypair_xml_dump_file}")
    assert reply_ecode == 0, "Keypairs received"
    table_dictdata = xmltodict.parse(reply_stdout)
    table_data_dictdata = table_dictdata['mysqldump']['database']['table_data']['row']
    return normalize_table_data(table_data_dictdata)

def get_source_keypair(keypairs, keypair_name, user_id):
    """ """
    keypairs_selected = [ i_keypair for i_keypair in keypairs if i_keypair.get("name", "") == keypair_name and i_keypair.get("user_id", "") == user_id ]
    if keypairs_selected:
        return keypairs_selected[0]
    return None

def create_keypair(ostack_connection, keypair):
    """ """
    return ostack_connection.compute.create_keypair(name=keypair['name'], public_key=keypair['public_key'], type=keypair['type'])

def log_or_assert(args, msg, condition, trace_details=None):
    if not condition:
        with open(args.exception_trace_file, "w") as file:
            file.write(f"{msg}\n{pprint.pformat(trace_details)}\n\n{locals()}\n")
def wait_for_ostack_server_status(ostack_connection, server_name_or_id, server_status, timeout=120):
    """ """
    int_start_timestamp = time.time()
    int_server = ostack_connection.compute.find_server(server_name_or_id)
    int_server_status = None
    while True:
        if time.time() > (int_start_timestamp + timeout):
            break
        int_server_status = ostack_connection.compute.find_server(int_server.id).status
        if int_server_status == server_status:
            break

    return int_server_status

def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_status, timeout=120):
    """ """
    int_start_timestamp = time.time()
    int_volume = ostack_connection.block_storage.find_volume(volume_name_or_id)
    int_volume_status = None
    while True:
        if time.time() > (int_start_timestamp + timeout):
            break
        int_volume_status = ostack_connection.block_storage.find_volume(int_volume.id).status
        if int_volume_status == volume_status:
            break

    return int_volume_status


def main(args):
    """ """
    # connect to source cloud
    source_migrator_openrc = get_openrc(args.source_openrc)
    source_migrator_conn = get_ostack_connection(source_migrator_openrc)
    args.logger.info("A.1 Source OpenStack cloud connected as migrator user")
    destination_migrator_openrc = get_openrc(args.destination_openrc)
    destination_migrator_conn = get_ostack_connection(destination_migrator_openrc)
    args.logger.info("A.2 Destination OpenStack cloud connected as migrator user")

    # check project exists in source and destination
    source_project = get_ostack_project(source_migrator_conn, args.project_name)
    log_or_assert(args, f"B.1 Source OpenStack cloud project exists", source_project)
    source_project_type = get_ostack_project_type(source_migrator_conn, source_project)
    log_or_assert(args, f"B.2 Source OpenStack cloud project type is {source_project_type}",
                  source_project_type)

    destination_project = get_ostack_project(destination_migrator_conn, args.project_name)
    log_or_assert(args, f"B.10 Destination OpenStack cloud project exists", destination_project)
    destination_project_type = get_ostack_project_type(destination_migrator_conn, destination_project)
    log_or_assert(args, f"B.11 Destination OpenStack cloud project type is {destination_project_type}",
                  destination_project_type)

    log_or_assert(args, f"B.12 Source and destination project types match",
                  source_project_type == destination_project_type)

    # check user context switching & quotas
    source_project_conn = get_ostack_connection(source_migrator_openrc | {'OS_PROJECT_NAME': source_project.name})
    #source_project_quotas = source_project_conn.get_compute_quotas(source_project.id)
    #assert_msg = f"Context switching to source OpenStack cloud project {source_project.name} succeeded (id:{source_project.id})"
    #assert source_project_quotas and source_project_quotas.id == source_project.id, assert_msg
    #args.logger.info(f"C.2 {assert_msg}")

    destination_project_conn = get_ostack_connection(destination_migrator_openrc | {'OS_PROJECT_NAME': destination_project.name})
    #destination_project_quotas = destination_project_conn.get_compute_quotas(destination_project.id)
    #assert_msg = f"Context switching to destination OpenStack cloud project {destination_project.name} succeeded (id:{destination_project.id})"
    #assert destination_project_quotas and destination_project_quotas.id == destination_project.id, assert_msg
    #args.logger.info(f"C.2 {assert_msg}")
    reply_stdout, reply_stderr, reply_ecode = remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
                                                              args.ceph_migrator_sshkeyfile.name, 'uname -a')
    log_or_assert(args, f"D.1 Migrator host is reachable", 'Linux' in reply_stdout and reply_ecode == 0)

    reply_stdout, reply_stderr, reply_ecode = remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
                                                              args.ceph_migrator_sshkeyfile.name, '/root/migrator/ceph-accessible.sh')
    log_or_assert(args, f"D.2 Ceph is available from the migrator host", reply_ecode == 0)

    source_rbd_images = {args.source_ceph_ephemeral_pool_name: None,
                         args.source_ceph_cinder_pool_name: None}
    for i_pool_name in source_rbd_images.keys():
        source_rbd_images[i_pool_name] = ceph_rbd_images_list(args, i_pool_name)
        log_or_assert(args, f"D.3 Source cloud RBD images are received ({i_pool_name}).", source_rbd_images[i_pool_name])

    source_keypairs = get_source_keypairs(args)
    log_or_assert(args, f"D.4 Source OpenStack cloud keypairs received.", source_keypairs)

    # get source/destination entities in the project
    source_project_servers = get_ostack_project_servers(source_project_conn, source_project)
    args.logger.info(f"E.1 Source OpenStack cloud servers received")
    assert_entity_ownership(source_project_servers, source_project)
    args.logger.info(f"E.2 Source OpenStack cloud project {source_project.name} has {len(source_project_servers)} servers.")
    source_project_flavors = get_ostack_project_flavors(source_project_conn)
    log_or_assert(args, f"E.4 Source OpenStack flavor list received", source_project_flavors)

    destination_project_servers = get_ostack_project_servers(destination_project_conn, destination_project)
    args.logger.info(f"E.10 Destination OpenStack cloud servers received")
    assert_entity_ownership(destination_project_servers, destination_project)
    args.logger.info(f"E.11 Destination OpenStack cloud project {destination_project.name} has {len(destination_project_servers)} servers.")

    destination_project_flavors = get_ostack_project_flavors(destination_project_conn)
    log_or_assert(args, f"E.12 Destination OpenStack flavor list received", destination_project_flavors)

    log_or_assert(args, f"E.20 Source OpenStack VM ID validation succeeded",
                  args.validation_a_source_server_id in [i_server.id for i_server in source_project_servers])

    destination_image = destination_project_conn.image.find_image(args.destination_bootable_volume_image_name)
    log_or_assert(args, f"E.30 Destination image found and received", destination_image)

    args.logger.info(f"F.0 Main looping started")
    args.logger.info(f"F.0 Source VM servers: {[ i_source_server.name for i_source_server in source_project_servers]}")
    for i_source_server in source_project_servers:
        i_source_server_detail = source_project_conn.compute.find_server(i_source_server.id)

        if args.explicit_server_names and i_source_server.name not in args.explicit_server_names:
            args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to --explicit-server-names={args.explicit_server_names}")
            continue

        args.logger.info(f"F.1 server migration started - name:{i_source_server_detail.name}, id:{i_source_server_detail.id}, keypair: {i_source_server_detail.key_name}, flavor: {i_source_server_detail.flavor}, sec-groups:{i_source_server_detail.security_groups}, root_device_name: {i_source_server_detail.root_device_name}, block_device_mapping: {i_source_server_detail.block_device_mapping}, attached-volumes: {i_source_server_detail.attached_volumes}")

        # network, subnet detection
        i_source_server_network_names = i_source_server_detail.addresses.keys()
        i_destination_server_networks = []
        for i_source_network_name in i_source_server_network_names:
            i_destination_network_name = get_destination_network(i_source_network_name)
            log_or_assert(args, f"F.2 Source to Destination network mapping succeeeded ({i_source_network_name}->{i_destination_network_name})", i_destination_network_name)

            i_destination_network = destination_project_conn.network.find_network(i_destination_network_name)
            log_or_assert(args, f"F.3 Destination network exists ({i_destination_network})", i_destination_network)
            i_destination_server_networks.append(i_destination_network)

        # flavor detection
        i_source_server_flavor_name = i_source_server_detail.flavor.name
        i_destination_server_flavor_name = get_destination_flavor(i_source_server_flavor_name)
        log_or_assert(args, f"F.5 Source to Destination flavor mapping succeeeded ({i_source_server_flavor_name}->{i_destination_server_flavor_name})",
                      i_destination_server_flavor_name)

        log_or_assert(args, f"F.6 Destination OpenStack flavor exists",
                      [ i_flavor for i_flavor in destination_project_flavors if i_flavor.name == i_destination_server_flavor_name ])
        i_source_server_keypair = get_source_keypair(source_keypairs, i_source_server_detail.key_name, i_source_server_detail.user_id)
        log_or_assert(args, f"F.7 Source OpenStack server keypair found ({i_source_server_keypair})", i_source_server_keypair)

        i_destination_server_keypair = None
        if i_destination_server_keypairs := [i_keypair for i_keypair in destination_project_conn.list_keypairs() if i_keypair.name == i_source_server_detail.key_name]:
            i_destination_server_keypair = i_destination_server_keypairs[0]
            log_or_assert(args, f"F.8 Destination OpenStack server keypair found already ({i_destination_server_keypair})", i_destination_server_keypair)
        else:
            i_destination_server_keypair = create_keypair(destination_project_conn, i_source_server_keypair)
            args.logger.info("F.8 Destination OpenStack server keypair created")
        log_or_assert(args, f"F.9 Destination OpenStack server keypair exists ({i_destination_server_keypair})", i_destination_server_keypair)

        # security group
        source_project_security_groups = get_ostack_project_security_groups(source_project_conn, source_project)
        destination_project_security_groups = get_ostack_project_security_groups(destination_project_conn, destination_project)

        i_destination_server_security_groups=[]

        for i_source_server_security_group in i_source_server_detail.security_groups:
            i_destination_server_security_group = None
            if i_destination_server_security_group := destination_project_conn.network.find_security_group(i_source_server_security_group["name"], tenant_id=destination_project.id):
                log_or_assert(args, f"F.10 Destination OpenStack server security group found already ({i_destination_server_security_group})",
                              i_destination_server_security_group)
            else:
                i_destination_server_security_group = create_security_group(destination_project_conn, i_source_server_security_group)
                log_or_assert(args, f"F.10 Destination OpenStack server security group created ({i_destination_server_security_group})",
                              i_destination_server_security_group)
            log_or_assert(args, f"F.11 Destination OpenStack server security group exists ({i_destination_server_security_group})",
                            i_destination_server_security_group)
            i_destination_server_security_groups.append(i_destination_server_security_group)
        log_or_assert(args, f"F.12 Destination OpenStack server - all security groups exist {i_destination_server_security_groups}",
                      len(i_destination_server_security_groups) == len(i_source_server_detail.security_groups))
        i_server_block_device_mappings = [ ]
        # schema: [ {}, ... ]
        # where {} is following dict
        # { 'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image', 'volume_attachment_id': <>, 'volume_id': <>,
        #              'ceph_pool_name': <pool-name>, 'ceph_rbd_image_name': <rbd-image-name>, 'ceph_rbd_image_size': <size-gb>}
        #             OR
        #             {'block_storage_type': 'ceph-rbd-image', 'ceph_pool_name': <pool-name>, 'ceph_rbd_image_name': <rbd-image-name>, 'ceph_rbd_image_size': <size-gb> } ]
        #   'destination': {'volume_size': <size-gb>, 'volume_id': <vol-id>, 'device_name': <dev-name>, 'volume_bootable': True/False}
        # }

        i_source_server_root_device_name = i_source_server_detail.root_device_name
        log_or_assert(args, f"F.20 Source OpenStack server - root device name received ({i_source_server_root_device_name})",
                      i_source_server_root_device_name)

        i_source_server_volume_attachments = tuple(source_project_conn.compute.volume_attachments(i_source_server_detail.id))
        assert_msg = f"F.21 Source OpenStack server - volume attachments received {i_source_server_volume_attachments}"
        pprint.pprint(i_source_server_volume_attachments)
        i_source_ceph_ephemeral_rbd_image = None
        if i_source_server_root_device_name in [ i_source_server_attachment.device for i_source_server_attachment in i_source_server_volume_attachments ]:
            args.logger.info(f"F.22 Source OpenStack server - one of attached volume is attached as the root partition")

            # populate i_server_block_device_mappings
            for i_source_server_volume_attachment in i_source_server_volume_attachments:
                i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id)
                i_server_block_device_mappings.append({'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
                                                                  'volume_attachment_id': i_source_server_volume_attachment.id,
                                                                  'volume_id': i_server_volume.id,
                                                                  'ceph_pool_name': args.source_ceph_cinder_pool_name,
                                                                  'ceph_rbd_image_name': i_server_volume.id},
                                                       'destination': {'volume_size': i_server_volume.size,
                                                                       'volume_name': i_server_volume.name,
                                                                       'volume_description': i_server_volume.description,
                                                                       'volume_id': None,
                                                                       'ceph_pool_name': args.destination_ceph_cinder_pool_name,
                                                                       'device_name': os.path.basename(i_source_server_volume_attachment.device),
                                                                       'volume_bootable': i_source_server_root_device_name == i_source_server_volume_attachment.device}})
        else:
            args.logger.info(f"F.22 Source OpenStack server - none of attached volumes is attached as the root partition. Seeking for root partition RBD image")
            if f"{i_source_server_detail.id}_disk" in source_rbd_images[args.source_ceph_ephemeral_pool_name]:
                i_source_ceph_ephemeral_rbd_image = f"{i_source_server_detail.id}_disk"
                args.logger.info(f"F.23 Source OpenStack server - Root partition found as RBD image {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}")
                # get rbd image info / size
                i_source_ceph_ephemeral_rbd_image_data = ceph_rbd_image_info(args, args.source_ceph_ephemeral_pool_name,
                                                                            i_source_ceph_ephemeral_rbd_image)
                log_or_assert(args, f"F.24 Source OpenStack ceph RBD image information received",
                                i_source_ceph_ephemeral_rbd_image_data and 'size' in i_source_ceph_ephemeral_rbd_image_data)
                i_source_ceph_ephemeral_rbd_image_size = math.ceil(i_source_ceph_ephemeral_rbd_image_data['size'] / 1024 / 1024 / 1024)
                log_or_assert(args, f"F.25 Source OpenStack ceph RBD image size calculated",
                              i_source_ceph_ephemeral_rbd_image_size)


                # populate i_server_block_device_mappings
                ## initial disk
                i_server_block_device_mappings.append({'source': {'block_storage_type': 'ceph-rbd-image',
                                                                  'ceph_pool_name': args.source_ceph_ephemeral_pool_name,
                                                                  'ceph_rbd_image_name': i_source_ceph_ephemeral_rbd_image,
                                                                  'ceph_rbd_image_size': i_source_ceph_ephemeral_rbd_image_size},
                                                       'destination': {'volume_size': i_source_ceph_ephemeral_rbd_image_size,
                                                                       'volume_name': i_source_ceph_ephemeral_rbd_image,
                                                                       'volume_description': f"RBD {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}",
                                                                       'volume_id': None,
                                                                       'ceph_pool_name': args.destination_ceph_cinder_pool_name,
                                                                       'device_name': os.path.basename(i_source_server_root_device_name),
                                                                       'volume_bootable': True}})

                ## other disks attached to VM
                for i_source_server_volume_attachment in i_source_server_volume_attachments:
                    i_server_volume = source_project_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id)
                    i_server_block_device_mappings.append({'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
                                                                      'volume_attachment_id': i_source_server_volume_attachment.id,
                                                                      'volume_id': i_server_volume.id,
                                                                      'ceph_pool_name': args.source_ceph_cinder_pool_name,
                                                                      'ceph_rbd_image_name': i_server_volume.id},
                                                           'destination': {'volume_size': i_server_volume.size,
                                                                           'volume_name': i_server_volume.name,
                                                                           'volume_description': i_server_volume.description,
                                                                           'volume_id': None,
                                                                           'ceph_pool_name': args.destination_ceph_cinder_pool_name,
                                                                           'device_name': os.path.basename(i_source_server_volume_attachment.device),
                                                                           'volume_bootable': i_source_server_root_device_name == i_source_server_volume_attachment.device}})



        log_or_assert(args, f"F.26 Source OpenStack server - root partition detected",
                      i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['source'])
        log_or_assert(args, f"F.27 Destination OpenStack server - root partition details generated",
                      i_server_block_device_mappings and i_server_block_device_mappings[0] and i_server_block_device_mappings[0]['destination'])

        pprint.pprint(i_server_block_device_mappings)
        for i_destination_server_block_device_mapping in i_server_block_device_mappings:
            i_new_volume_args = {'name': i_destination_server_block_device_mapping['destination']['volume_name'],
                                 'size': i_destination_server_block_device_mapping['destination']['volume_size'],
                                 'description': f"{i_destination_server_block_device_mapping['destination']['volume_description']}, g1-to-g2-migrated"}
            # TO BE REVISED: this seems to be the only way how to create bootable volume using openstacksdk
            if i_destination_server_block_device_mapping['destination']['volume_bootable']:
                i_new_volume_args['imageRef'] = destination_image.id

            i_new_volume = destination_project_conn.block_storage.create_volume(**i_new_volume_args)
            log_or_assert(args, f"F.29 Destination OpenStack volume created (name:{i_new_volume.name}, id:{i_new_volume.id})", i_new_volume)
            wait_for_ostack_volume_status(destination_project_conn, i_new_volume.id, 'available')
            log_or_assert(args, f"F.30 Destination OpenStack volume available (name:{i_new_volume.name}, id:{i_new_volume.id})",
                          wait_for_ostack_volume_status(destination_project_conn, i_new_volume.id, 'available') == 'available')

            # remember volume ID
            i_destination_server_block_device_mapping['destination']['volume_id'] = i_new_volume.id

        for i_destination_server_block_device_mapping in i_server_block_device_mappings:
            log_or_assert(args, f"F.31 Destination OpenStack volume IDs properly stored", i_destination_server_block_device_mapping['destination']['volume_id'])

        # VM stop, wait for SHUTOFF
        if i_source_server_detail.status != 'SHUTOFF':
            source_project_conn.compute.stop_server(i_source_server_detail)
            log_or_assert(args, f"F.33 Source OpenStack VM server stopped",
                          wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'SHUTOFF') == "SHUTOFF")

        # volume migration (browse i_server_block_device_mappings)
        for i_server_block_device_mapping in i_server_block_device_mappings:
            ## G1: detect existing G1 RBD image
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
            i_source_server_rbd_images, i_stderr, i_ecode = ceph_rbd_image_exists(args,
                                                                                  i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                                  i_server_block_device_mapping['source']['ceph_rbd_image_name'])
            log_or_assert(args, f"F.41 Source OpenStack VM RBD image exists - query succeeded", i_ecode == 0, locals())
            log_or_assert(args, f"F.41 Source OpenStack VM RBD image exists - single image returned",
                          i_source_server_rbd_images and len(i_source_server_rbd_images) == 1, locals())
            i_source_server_rbd_image = i_source_server_rbd_images[0]


            ## G2: find volume
            #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
            i_destination_server_rbd_images, i_stderr, i_ecode = ceph_rbd_image_exists(args,
                                                                                i_server_block_device_mapping['destination']['ceph_pool_name'],
                                                                                i_server_block_device_mapping['destination']['volume_id'])
            log_or_assert(args, f"F.42 Destination OpenStack VM RBD image exists - query succeeded", i_ecode == 0, locals())
            log_or_assert(args, f"F.42 Destination OpenStack VM RBD image exists - single image returned",
                          i_destination_server_rbd_images and len(i_destination_server_rbd_images) == 1, locals())
            i_destination_server_rbd_image = i_destination_server_rbd_images[0]

            ## G1: create RBD image protected snapshot
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
            i_source_rbd_image_snapshot_name = f"g1-g2-migration-{i_source_server_rbd_image}"
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_snapshot_exists(args,
                                                                         i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                         i_source_server_rbd_image,
                                                                         i_source_rbd_image_snapshot_name)
            log_or_assert(args, f"F.43 Source OpenStack VM RBD image has non-colliding snapshot", i_ecode != 0, locals())

            i_stdout, i_stderr, i_ecode = ceph_rbd_image_snapshot_create(args,
                                                                         i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                         i_source_server_rbd_image,
                                                                         i_source_rbd_image_snapshot_name)
            log_or_assert(args, f"F.44 Source OpenStack VM RBD image snapshot created", i_ecode == 0, locals())


            i_stdout, i_stderr, i_ecode = ceph_rbd_image_snapshot_exists(args,
                                                                         i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                         i_source_server_rbd_image,
                                                                         i_source_rbd_image_snapshot_name)
            log_or_assert(args, f"F.45 Source OpenStack VM RBD image snapshot exists", i_ecode == 0, locals())

            ## G2: delete RBD image
            #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
            ## G2: confirm volume is deleted
            #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_delete(args,
                                                                i_server_block_device_mapping['destination']['ceph_pool_name'],
                                                                i_destination_server_rbd_image)
            log_or_assert(args, f"F.46 Destination OpenStack VM RBD image deletion succeeded", i_ecode == 0, locals())
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_exists(args,
                                                                i_server_block_device_mapping['destination']['ceph_pool_name'],
                                                                i_destination_server_rbd_image)
            log_or_assert(args, f"F.47 Destination OpenStack VM RBD image does not exist", i_ecode != 0, locals())


            ## G1: clone from snapshot
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
            i_source_rbd_cloned_image_name = f"g1-g2-migration-{i_source_server_rbd_image}"
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_clone(args,
                                                               i_server_block_device_mapping['source']['ceph_pool_name'],
                                                               i_source_server_rbd_image,
                                                               i_source_rbd_image_snapshot_name,
                                                               i_server_block_device_mapping['source']['ceph_pool_name'],
                                                               i_source_rbd_cloned_image_name)
            log_or_assert(args, f"F.48 Source OpenStack VM RBD image cloned succesfully", i_ecode == 0, locals())
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_exists(args,
                                                                i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                i_source_rbd_cloned_image_name)
            log_or_assert(args, f"F.49 Source OpenStack VM cloned RBD image exists", i_ecode == 0, locals())

            ## G1: flatten cloned RBD image
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_flatten(args,
                                                                 i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                 i_source_rbd_cloned_image_name)
            log_or_assert(args, f"F.50 Source OpenStack VM cloned RBD image flatten successfully", i_ecode == 0, locals())

            ## G1->G2: copy RBD image to target pool
            #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
            #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_copy(args,
                                                              i_server_block_device_mapping['source']['ceph_pool_name'],
                                                              i_source_rbd_cloned_image_name,
                                                              i_server_block_device_mapping['destination']['ceph_pool_name'],
                                                              i_destination_server_rbd_image)
            log_or_assert(args, f"F.51 Source OpenStack VM RBD image copied G1 -> G2 succesfully", i_ecode == 0, locals())
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_exists(args,
                                                                i_server_block_device_mapping['destination']['ceph_pool_name'],
                                                                i_destination_server_rbd_image)
            log_or_assert(args, f"F.52 Destination OpenStack VM RBD image exists", i_ecode == 0, locals())

            ## G1: delete cloned RBD image
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_delete(args,
                                                                i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                i_source_rbd_cloned_image_name)
            log_or_assert(args, f"F.53 Source OpenStack VM RBD cloned image deletion succeeded", i_ecode == 0, locals())
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_exists(args,
                                                                i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                i_source_rbd_cloned_image_name)
            log_or_assert(args, f"F.54 Source OpenStack VM cloned RBD image does not exist anymore", i_ecode != 0, locals())

            ## G1: remove created snapshot
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
            #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_snapshot_exists(args,
                                                                         i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                         i_source_server_rbd_image,
                                                                         i_source_rbd_image_snapshot_name)
            log_or_assert(args, f"F.55 Source OpenStack VM RBD image snapshot still exists", i_ecode == 0, locals())
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_snapshot_delete(args,
                                                                         i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                         i_source_server_rbd_image,
                                                                         i_source_rbd_image_snapshot_name)
            log_or_assert(args, f"F.56 Source OpenStack VM RBD image snapshot deletion succeeeded", i_ecode == 0, locals())
            i_stdout, i_stderr, i_ecode = ceph_rbd_image_snapshot_exists(args,
                                                                         i_server_block_device_mapping['source']['ceph_pool_name'],
                                                                         i_source_server_rbd_image,
                                                                         i_source_rbd_image_snapshot_name)
            log_or_assert(args, f"F.57 Source OpenStack VM RBD image snapshot does not exist anymore", i_ecode != 0, locals())

        # start server in source cloud, wait for back being 'ACTIVE'
        if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status:
            if i_source_server_detail.status == 'ACTIVE':
                source_project_conn.compute.start_server(i_source_server_detail)
                log_or_assert(args, f"F.49 Source OpenStack VM server started back",
                              wait_for_ostack_server_status(source_project_conn, i_source_server.id, 'ACTIVE') == "ACTIVE",
                              locals())
        i_destination_server_flavor = destination_project_conn.compute.find_flavor(i_destination_server_flavor_name)
        i_destination_server_args = {'name': i_source_server_detail.name,
                                     'flavorRef': i_destination_server_flavor.id,
                                     'block_device_mapping_v2': [ {'source_type': 'volume',
                                                                   'destination_type': 'volume',
                                                                   'uuid': i_server_block_device_mapping['destination']['volume_id'],
                                                                   'device_name': i_server_block_device_mapping['destination']['device_name'],
                                                                   'boot_index': 0 if i_server_block_device_mapping['destination']['volume_bootable'] else None}
                                                                   for i_server_block_device_mapping in i_server_block_device_mappings ],
                                     'boot_volume': i_server_block_device_mappings[0]['destination']['volume_id'],
                                     'key_name': i_destination_server_keypair["name"],
                                     'networks': [ {'uuid': i_network.id} for i_network in i_destination_server_networks ]}
        log_or_assert(args, f"F.60 Destination OpenStack server arguments are generated with valid block-device-mapping",
                      i_destination_server_args['block_device_mapping_v2'], locals())
        log_or_assert(args, f"F.60 Destination OpenStack server arguments are generated with valid network configuration",
                      i_destination_server_args['networks'], locals())

        pprint.pprint(i_destination_server_args)
        i_destination_server = destination_project_conn.compute.create_server(**i_destination_server_args)
        log_or_assert(args, f"F.61 Destination OpenStack server is created", i_destination_server, locals())
        i_destination_server = destination_project_conn.compute.wait_for_server(i_destination_server)
        log_or_assert(args, f"F.62 Destination OpenStack server got ACTIVE",
                      i_destination_server.status == 'ACTIVE', locals())

        # add security groups to the destination server (if missing)
        for i_destination_server_security_group_id, i_destination_server_security_group_name in set([(i_destination_server_security_group.id, i_destination_server_security_group.name) for i_destination_server_security_group in i_destination_server_security_groups]):
            if {'name': i_destination_server_security_group_name } not in i_destination_server.security_groups:
                destination_project_conn.add_server_security_groups(i_destination_server.id, i_destination_server_security_group_id)

        args.logger.info(f"F.64 Source OpenStack server name:{i_source_server_detail.name} migrated into destination one name:{i_destination_server.name} id:{i_destination_server.id}")


# main() call (argument parsing)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
    AP = argparse.ArgumentParser(epilog=globals().get('__doc__'),
                                 formatter_class=argparse.RawDescriptionHelpFormatter)
    AP.add_argument('--source-openrc', default=None, type=argparse.FileType('r'),
                    required=True, help='Source cloud authentication (OpenRC file)')
    AP.add_argument('--destination-openrc', default=None, type=argparse.FileType('r'),
                    required=True, help='Destination cloud authentication (OpenRC file)')
    AP.add_argument('--ceph-migrator-host', default='controller-ostack.stage.cloud.muni.cz',
                    help='OpenStack migrator ceph node host')
    AP.add_argument('--ceph-migrator-user', default='root',
                    help='OpenStack migrator ceph node username')
    AP.add_argument('--ceph-migrator-sshkeyfile', default=None, type=argparse.FileType('r'),
                    help='OpenStack migrator SSH keyfile')
    AP.add_argument('--source-ceph-cinder-pool-name', default='prod-cinder-volumes',
                    help='Source OpenStack/ceph cloud Cinder pool name')
    AP.add_argument('--source-ceph-ephemeral-pool-name', default='prod-ephemeral-vms',
                    help='Source OpenStack/ceph cloud "ephemeral on ceph" or "libvirt ephemeral" pool name')
    AP.add_argument('--destination-ceph-cinder-pool-name', default='cloud-cinder-volumes-prod-brno',
                    help='Destination OpenStack/ceph cloud Cinder pool name')
    AP.add_argument('--destination-ceph-ephemeral-pool-name', default='cloud-ephemeral-volumes-prod-brno',
                    help='Destination OpenStack/ceph cloud "ephemeral on ceph" or "libvirt ephemeral" pool name')
    AP.add_argument('--source-keypair-xml-dump-file', default='/root/migrator/prod-nova_api_key_pairs.dump.xml',
                    help='Source OpenStack cloud keypair SQL/XML dump file name')
    AP.add_argument('--destination-bootable-volume-image-name', default='cirros-0-x86_64',
                    help='Destination cloud bootable volumes are made on top of public image. Name of destination cloud image.')

    AP.add_argument('--project-name', default=None, required=True,
                    help='OpenStack project name (identical name in both clouds required)')
    AP.add_argument('--entity-overwrite', default=False, action='store_true',
                    help='Instruct migrator to overwrite already existing entities in destination cloud, TODO')
    AP.add_argument('--explicit-server-names', default=None, required=False,
                    help='(Optional) List of explicit server names or IDs to be migrated. Delimiter comma or space.')

    AP.add_argument('--validation-a-source-server-id', default=None, required=True,
                    help='For validation any server ID from source OpenStack project')

    AP.add_argument('--exception-trace-file', default="project-migrator.dump",
                    required=False,
                    help='Exception / assert dump state file')

    logging.basicConfig(level=logging.INFO,  # Set the logging level
                        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ARGS = AP.parse_args()
    ARGS.logger = logging.getLogger("project-migrator")
    ARGS.explicit_server_names = normalize_servers(ARGS.explicit_server_names)
    sys.exit(main(ARGS))