diff --git a/ci/lib.py b/ci/lib.py
index 3ac628e768c8890915b39ad3d6318c6df12ec1ba..f2ac7fecac29ff5f9bcd4f1dc6aacccb577f4e1c 100644
--- a/ci/lib.py
+++ b/ci/lib.py
@@ -1,13 +1,11 @@
 """ OpenStack project migrator library """
 
-import copy
 import re
 import pprint
 import time
 import os
 import os.path
 
-import xmltodict
 import paramiko
 import openstack
 from keystoneauth1.identity import v3
@@ -74,70 +72,6 @@ def get_destination_router(source_router):
     return None
 
 
-def get_destination_flavor(source_flavor):
-    """ LUT for flavors """
-    flavor_mapping = {
-        #'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
-        #'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
-        #'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
-        'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram', # nesedi velikost disku v G2 je 80 misto 35
-        'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram', # ok
-        'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram', # nesedi velikost disku v G2 je 80 misto 35
-        'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram', # ok
-        #'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
-        'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500', # neni v G2 a je potreba
-        'hpc.16core-128ram': 'c3.16core-128ram', # neni v G2 a je potreba
-        'hpc.16core-256ram': 'c3.16core-256ram', # neni v G2 a je potreba
-        'hpc.16core-32ram': 'c2.16core-30ram', # ok
-        'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk', # neni v G2 a je potreba
-        'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd', # neni v G2 a je potreba
-        'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram', # ok
-        'hpc.18core-48ram': '', # neni v G2 a je potreba
-        'hpc.18core-64ram-dukan': 'c2.24core-60ram', # nemusime resit
-        'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd', # nemusime resit
-        'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500', # neni v G2 a je potreba
-        'hpc.30core-256ram': 'c3.30core-256ram', # neni v G2 a je potreba
-        'hpc.30core-64ram': 'c3.32core-60ram', # v G2 je o 2 CPU vic
-        'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram', # ok
-        'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', #  ok
-        'hpc.4core-4ram': 'e1.medium', # nemusime resit
-        'hpc.8core-128ram': 'c3.8core-128ram', # neni v G2 a je potreba
-        'hpc.8core-16ram': 'c2.8core-16ram', # ok
-        'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram', # nemusime resit
-        'hpc.8core-256ram': None, # nemusime resit
-        'hpc.8core-32ram-dukan': 'c2.8core-30ram', # nemusime resit
-        'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram', # ok
-        'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram', # ok
-        'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram', # ok
-        'hpc.8core-8ram': 'e1.1xlarge', # v G2 je o 20 GB mensi disk
-        'hpc.hdh-ephem': 'hpc.hdh', # neni a je potreba
-        'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500', # neni potreba
-        'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # neni potreba
-        #'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
-        'hpc.large': 'g2.3xlarge', # ok
-        'hpc.medium': 'c2.8core-30ram', # ok
-        'hpc.small': 'c2.4core-16ram', # ok
-        'hpc.xlarge': None, # neni v G2
-        'hpc.xlarge-memory': 'c3.xlarge-memory', # neni v G2
-        'standard.16core-32ram': 'g2.2xlarge', # ok
-        'standard.20core-128ram': 'e1.20core-128ram', # neni potreba
-        'standard.20core-256ram': 'e1.20core-256ram', # neni v G2
-        'standard.2core-16ram': 'c3.2core-16ram', # ok
-        'standard.large': 'e1.large', # ok pripadne jeste c3.4core-8ram
-        'standard.medium': 'e1.medium', # o 2 vice CPU
-        'standard.memory': 'c3.2core-30ram', # pripadne i c2.2core-30ram
-        'standard.one-to-many': 'c3.24core-60ram', # v G2 je o 4 vice CPU
-        'standard.small': 'e1.small', # 2x vice ram a CPU u G2
-        'standard.tiny': 'e1.tiny', # 2x vice ram a CPU u G2
-        'standard.xlarge': 'e1.2xlarge', # o 4 vice CPU G2
-        'standard.xlarge-cpu': 'e1.2xlarge', # ok
-        'standard.xxlarge': 'c2.8core-30ram', # ok
-        'standard.xxxlarge': 'c3.8core-60ram'  # ok
-    }
-    assert source_flavor in flavor_mapping, "Source flavor can be mapped to destination one"
-    assert flavor_mapping[source_flavor], "Source flavor mapping is not valid"
-    return flavor_mapping[source_flavor]
-
 def normalize_table_data_field(data_field):
     """ normalize single data field (single data insert) """
     int_dict = {}
@@ -231,9 +165,6 @@ def get_ostack_project_servers(ostack_connection, project=None):
 def get_ostack_project_volumes(ostack_connection, project=None):
     return ostack_connection.block_store.volumes()
 
-def get_ostack_project_flavors(ostack_connection, project=None):
-    return tuple(ostack_connection.compute.flavors())
-
 def get_resource_details(resources):
     """ inspect resources """
     for i_resource in resources:
@@ -272,81 +203,7 @@ def assert_entity_ownership(entities, project):
     for i_entity in entities:
         assert i_entity.project_id == project.id, f"Entity belongs to expected project (id: {project.id})"
 
-def get_source_keypairs(args):
-    """ """
-    reply_stdout, reply_stderr, reply_ecode = remote_cmd_exec(args.ceph_migrator_host,
-                                                              args.ceph_migrator_user,
-                                                              args.ceph_migrator_sshkeyfile.name,
-                                                              f"cat {args.source_keypair_xml_dump_file}")
-    assert reply_ecode == 0, "Keypairs received"
-    table_dictdata = xmltodict.parse(reply_stdout)
-    table_data_dictdata = table_dictdata['mysqldump']['database']['table_data']['row']
-    return normalize_table_data(table_data_dictdata)
-
-def get_source_keypair(keypairs, keypair_name, user_id):
-    """ """
-    keypairs_selected = [ i_keypair for i_keypair in keypairs if i_keypair.get("name", "") == keypair_name and i_keypair.get("user_id", "") == user_id ]
-    if keypairs_selected:
-        return keypairs_selected[0]
-    return None
-
-def create_keypair(args, ostack_connection, keypair):
-    """ create openstack keypair object """
-    return ostack_connection.compute.create_keypair(name=get_dst_resource_name(args, keypair['name']),
-                                                    public_key=keypair['public_key'], type=keypair['type'])
-
-def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_group, dst_project, recursion_stack=None):
-    """ create openstack security group[s] """
-    int_recursion_stack = {} if recursion_stack is None else recursion_stack
-    int_sg = dst_ostack_conn.network.create_security_group(name=get_dst_resource_name(args, src_security_group.name),
-                                                           description=get_dst_resource_desc(args,
-                                                                                             src_security_group.description,
-                                                                                             src_security_group.id),
-                                                           project_id=dst_project.id)
-    int_recursion_stack[src_security_group.id] = int_sg.id
-
-    for i_rule in src_security_group.security_group_rules:
-        # browse security group rules
-        i_mod_rule = trim_dict(i_rule, denied_keys=['id', 'project_id', 'tenant_id', 'revision_number', 'updated_at', 'created_at', 'tags', 'standard_attr_id', 'normalized_cidr'])
-        i_mod_rule['security_group_id'] = int_sg.id
-        i_mod_rule['project_id'] = dst_project.id
-        i_mod_rule = {i_k: i_mod_rule[i_k] for i_k in i_mod_rule if i_mod_rule[i_k] is not None}
-        if i_mod_rule.get('remote_group_id') is not None:
-            if i_mod_rule['remote_group_id'] in int_recursion_stack:
-                # keep reference to itself or known (already created) SGs
-                i_mod_rule['remote_group_id'] = int_recursion_stack[i_mod_rule['remote_group_id']]
-            # get linked source SG
-            elif _src_sg := src_ostack_conn.network.find_security_group(i_mod_rule['remote_group_id']):
-                if _dst_sg := dst_ostack_conn.network.find_security_group(get_dst_resource_name(args, _src_sg.name),
-                                                                          project_id=dst_project.id):
-                    i_mod_rule['remote_group_id'] = _dst_sg.id
-                else:
-                    int_linked_sg = create_security_groups(args, src_ostack_conn, dst_ostack_conn,
-                                                            _src_sg, dst_project,
-                                                            copy.deepcopy(int_recursion_stack))
-                    i_mod_rule['remote_group_id'] = int_linked_sg.id
-        try:
-            dst_ostack_conn.network.create_security_group_rule(**i_mod_rule)
-        except openstack.exceptions.ConflictException as ex:
-            pass
-
-    return int_sg
-
-def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project):
-    """ duplicate all projects's openstack security group[s] """
-
-    src_project_security_groups = tuple(src_ostack_conn.network.security_groups(project_id=src_project.id))
-
-    for i_src_security_group in src_project_security_groups:
-        j_dst_security_group_found = False
-        for j_dst_security_group in tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id)):
-            if get_dst_resource_name(args, i_src_security_group.name) == j_dst_security_group.name and \
-               i_src_security_group.id in j_dst_security_group.description:
-                j_dst_security_group_found = True
-        if not j_dst_security_group_found:
-            create_security_groups(args, src_ostack_conn, dst_ostack_conn, i_src_security_group, dst_project)
 
-    return src_project_security_groups, tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id))
 
 
 def log_or_assert(args, msg, condition, trace_details=None):
diff --git a/ci/olib.py b/ci/olib.py
index 4504afbc3e91191761fbe37ca7e7773c81573a12..89bb2433fc39d38aa7efce8b4eb9047efdf06df6 100644
--- a/ci/olib.py
+++ b/ci/olib.py
@@ -1,6 +1,11 @@
 """ OpenStack migrator - OpenStack library """
 
-from lib import log_or_assert, get_dst_resource_name, get_dst_resource_desc
+import copy
+
+import xmltodict
+import openstack
+
+from lib import log_or_assert, get_dst_resource_name, get_dst_resource_desc, remote_cmd_exec, normalize_table_data, trim_dict
 
 def get_destination_network(source_network):
     """ LUT for networks """
@@ -29,6 +34,71 @@ def get_destination_network(source_network):
         return network_mapping[source_network]
     return None
 
+def get_destination_flavor(source_flavor):
+    """ LUT for flavors """
+    flavor_mapping = {
+        #'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
+        #'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
+        #'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
+        'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram', # nesedi velikost disku v G2 je 80 misto 35
+        'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram', # ok
+        'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram', # nesedi velikost disku v G2 je 80 misto 35
+        'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram', # ok
+        #'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
+        'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500', # neni v G2 a je potreba
+        'hpc.16core-128ram': 'c3.16core-128ram', # neni v G2 a je potreba
+        'hpc.16core-256ram': 'c3.16core-256ram', # neni v G2 a je potreba
+        'hpc.16core-32ram': 'c2.16core-30ram', # ok
+        'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk', # neni v G2 a je potreba
+        'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd', # neni v G2 a je potreba
+        'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram', # ok
+        'hpc.18core-48ram': '', # neni v G2 a je potreba
+        'hpc.18core-64ram-dukan': 'c2.24core-60ram', # nemusime resit
+        'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd', # nemusime resit
+        'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500', # neni v G2 a je potreba
+        'hpc.30core-256ram': 'c3.30core-256ram', # neni v G2 a je potreba
+        'hpc.30core-64ram': 'c3.32core-60ram', # v G2 je o 2 CPU vic
+        'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram', # ok
+        'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', #  ok
+        'hpc.4core-4ram': 'e1.medium', # nemusime resit
+        'hpc.8core-128ram': 'c3.8core-128ram', # neni v G2 a je potreba
+        'hpc.8core-16ram': 'c2.8core-16ram', # ok
+        'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram', # nemusime resit
+        'hpc.8core-256ram': None, # nemusime resit
+        'hpc.8core-32ram-dukan': 'c2.8core-30ram', # nemusime resit
+        'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram', # ok
+        'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram', # ok
+        'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram', # ok
+        'hpc.8core-8ram': 'e1.1xlarge', # v G2 je o 20 GB mensi disk
+        'hpc.hdh-ephem': 'hpc.hdh', # neni a je potreba
+        'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500', # neni potreba
+        'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # neni potreba
+        #'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
+        'hpc.large': 'g2.3xlarge', # ok
+        'hpc.medium': 'c2.8core-30ram', # ok
+        'hpc.small': 'c2.4core-16ram', # ok
+        'hpc.xlarge': None, # neni v G2
+        'hpc.xlarge-memory': 'c3.xlarge-memory', # neni v G2
+        'standard.16core-32ram': 'g2.2xlarge', # ok
+        'standard.20core-128ram': 'e1.20core-128ram', # neni potreba
+        'standard.20core-256ram': 'e1.20core-256ram', # neni v G2
+        'standard.2core-16ram': 'c3.2core-16ram', # ok
+        'standard.large': 'e1.large', # ok pripadne jeste c3.4core-8ram
+        'standard.medium': 'e1.medium', # o 2 vice CPU
+        'standard.memory': 'c3.2core-30ram', # pripadne i c2.2core-30ram
+        'standard.one-to-many': 'c3.24core-60ram', # v G2 je o 4 vice CPU
+        'standard.small': 'e1.small', # 2x vice ram a CPU u G2
+        'standard.tiny': 'e1.tiny', # 2x vice ram a CPU u G2
+        'standard.xlarge': 'e1.2xlarge', # o 4 vice CPU G2
+        'standard.xlarge-cpu': 'e1.2xlarge', # ok
+        'standard.xxlarge': 'c2.8core-30ram', # ok
+        'standard.xxxlarge': 'c3.8core-60ram'  # ok
+    }
+    assert source_flavor in flavor_mapping, "Source flavor can be mapped to destination one"
+    assert flavor_mapping[source_flavor], "Source flavor mapping is not valid"
+    return flavor_mapping[source_flavor]
+
+
 def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project, src_network_name):
     """ Create matching OpenStack networking (network, subnet, router) """
     # read source network details
@@ -105,10 +175,10 @@ def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_pr
 def get_or_create_dst_server_networking(args,
                                         source_project_conn, destination_project_conn,
                                         source_project, destination_project,
-                                        source_server_detail):
+                                        source_server):
     """ assure created server networking (get or create) """
     server_network_addresses = []
-    for i_source_network_name, i_source_network_addresses in source_server_detail.addresses.items():
+    for i_source_network_name, i_source_network_addresses in source_server.addresses.items():
         i_destination_network_name = get_destination_network(i_source_network_name)
 
         if not i_destination_network_name:
@@ -125,3 +195,156 @@ def get_or_create_dst_server_networking(args,
                                          'src-network-addresses': {'network-name': i_source_network_name,
                                                                    'addresses': i_source_network_addresses}})
     return server_network_addresses
+
+
+def get_dst_server_flavor(args, src_server, dst_ostack_conn):
+    """ translate and return destination server flavor object """
+    source_server_flavor_name = src_server.flavor.name
+    destination_server_flavor_name = get_destination_flavor(source_server_flavor_name)
+
+    log_or_assert(args,
+                  f"F.5 Source to Destination flavor mapping succeeeded ({source_server_flavor_name}->{destination_server_flavor_name})",
+                  destination_server_flavor_name)
+    destination_server_flavor = dst_ostack_conn.compute.find_flavor(destination_server_flavor_name)
+    log_or_assert(args,
+                  "F.6 Destination OpenStack flavor exists",
+                  destination_server_flavor)
+
+    return destination_server_flavor
+
+
+
+def get_source_keypairs(args):
+    """ receive source openstack keypairs from ceph migrator host as xml formatted sql dump """
+    reply_stdout, _, reply_ecode = remote_cmd_exec(args.ceph_migrator_host,
+                                                   args.ceph_migrator_user,
+                                                   args.ceph_migrator_sshkeyfile.name,
+                                                   f"cat {args.source_keypair_xml_dump_file}")
+    assert reply_ecode == 0, "Keypairs received"
+    table_dictdata = xmltodict.parse(reply_stdout)
+    table_data_dictdata = table_dictdata['mysqldump']['database']['table_data']['row']
+    return normalize_table_data(table_data_dictdata)
+
+def get_source_keypair(keypairs, keypair_name, user_id):
+    """ get specific source cloud keypair from source keypairs """
+    keypairs_selected = [ i_keypair for i_keypair in keypairs if i_keypair.get("name", "") == keypair_name and i_keypair.get("user_id", "") == user_id ]
+    if keypairs_selected:
+        return keypairs_selected[0]
+    return None
+
+def create_keypair(args, ostack_connection, keypair):
+    """ create openstack keypair object """
+    return ostack_connection.compute.create_keypair(name=get_dst_resource_name(args, keypair['name']),
+                                                    public_key=keypair['public_key'], type=keypair['type'])
+
+def get_or_create_dst_server_keypair(args, source_keypairs, src_server, dst_ostack_conn):
+    """ assure destination cloud keypair exists """
+    source_server_keypair = get_source_keypair(source_keypairs,
+                                               src_server.key_name,
+                                               src_server.user_id)
+    log_or_assert(args,
+                  f"F.7 Source OpenStack server keypair found ({source_server_keypair['name']})",
+                  source_server_keypair)
+
+    destination_server_keypair = None
+    if destination_server_keypairs := [i_keypair for i_keypair in dst_ostack_conn.list_keypairs()
+                                            if i_keypair.name == get_dst_resource_name(args,
+                                                                                       src_server.key_name)]:
+        destination_server_keypair = destination_server_keypairs[0]
+        log_or_assert(args,
+                      f"F.8 Destination OpenStack server keypair found already ({destination_server_keypair.name})",
+                      destination_server_keypair)
+    else:
+        destination_server_keypair = create_keypair(args,
+                                                    dst_ostack_conn,
+                                                    source_server_keypair)
+        args.logger.info("F.8 Destination OpenStack server keypair created")
+    log_or_assert(args,
+                  f"F.9 Destination OpenStack server keypair exists ({destination_server_keypair.name})",
+                  destination_server_keypair)
+    return destination_server_keypair
+
+
+def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_group, dst_project, recursion_stack=None):
+    """ create openstack security group[s] """
+    int_recursion_stack = {} if recursion_stack is None else recursion_stack
+    int_sg = dst_ostack_conn.network.create_security_group(name=get_dst_resource_name(args, src_security_group.name),
+                                                           description=get_dst_resource_desc(args,
+                                                                                             src_security_group.description,
+                                                                                             src_security_group.id),
+                                                           project_id=dst_project.id)
+    int_recursion_stack[src_security_group.id] = int_sg.id
+
+    for i_rule in src_security_group.security_group_rules:
+        # browse security group rules
+        i_mod_rule = trim_dict(i_rule, denied_keys=['id', 'project_id', 'tenant_id', 'revision_number', 'updated_at', 'created_at', 'tags', 'standard_attr_id', 'normalized_cidr'])
+        i_mod_rule['security_group_id'] = int_sg.id
+        i_mod_rule['project_id'] = dst_project.id
+        i_mod_rule = {i_k: i_mod_rule[i_k] for i_k in i_mod_rule if i_mod_rule[i_k] is not None}
+        if i_mod_rule.get('remote_group_id') is not None:
+            if i_mod_rule['remote_group_id'] in int_recursion_stack:
+                # keep reference to itself or known (already created) SGs
+                i_mod_rule['remote_group_id'] = int_recursion_stack[i_mod_rule['remote_group_id']]
+            # get linked source SG
+            elif _src_sg := src_ostack_conn.network.find_security_group(i_mod_rule['remote_group_id']):
+                if _dst_sg := dst_ostack_conn.network.find_security_group(get_dst_resource_name(args, _src_sg.name),
+                                                                          project_id=dst_project.id):
+                    i_mod_rule['remote_group_id'] = _dst_sg.id
+                else:
+                    int_linked_sg = create_security_groups(args, src_ostack_conn, dst_ostack_conn,
+                                                            _src_sg, dst_project,
+                                                            copy.deepcopy(int_recursion_stack))
+                    i_mod_rule['remote_group_id'] = int_linked_sg.id
+        try:
+            dst_ostack_conn.network.create_security_group_rule(**i_mod_rule)
+        except openstack.exceptions.ConflictException:
+            pass
+
+    return int_sg
+
+def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project):
+    """ duplicate all projects's openstack security group[s] """
+
+    src_project_security_groups = tuple(src_ostack_conn.network.security_groups(project_id=src_project.id))
+
+    for i_src_security_group in src_project_security_groups:
+        j_dst_security_group_found = False
+        for j_dst_security_group in tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id)):
+            if get_dst_resource_name(args, i_src_security_group.name) == j_dst_security_group.name and \
+               i_src_security_group.id in j_dst_security_group.description:
+                j_dst_security_group_found = True
+        if not j_dst_security_group_found:
+            create_security_groups(args, src_ostack_conn, dst_ostack_conn, i_src_security_group, dst_project)
+
+    return src_project_security_groups, tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id))
+
+
+def get_or_create_dst_server_security_groups(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project, src_server):
+    """ assure equivalent security groups are created in destination cloud """
+    dst_server_security_groups=[]
+    for i_src_server_security_group_name in {i_sg['name'] for i_sg in src_server.security_groups}:
+        i_src_server_security_group = src_ostack_conn.network.find_security_group(i_src_server_security_group_name,
+                                                                                  project_id=src_project.id)
+        i_dst_server_security_group = None
+        if i_dst_server_security_group := dst_ostack_conn.network.find_security_group(get_dst_resource_name(args,
+                                                                                                            i_src_server_security_group.name),
+                                                                                                            project_id=dst_project.id):
+            log_or_assert(args,
+                          f"F.10 Destination OpenStack server security group found already ({i_dst_server_security_group.name})",
+                          i_dst_server_security_group)
+        else:
+            args.logger.info("F.10 Destination OpenStack server matching security group not found and gets created.")
+            i_dst_server_security_group = create_security_groups(args, src_ostack_conn, dst_ostack_conn,
+                                                                 i_src_server_security_group, dst_project)
+            log_or_assert(args,
+                          f"F.10 Destination OpenStack server security group created ({i_dst_server_security_group.name})",
+                          i_dst_server_security_group)
+
+        log_or_assert(args,
+                      f"F.11 Destination OpenStack server security group exists ({i_dst_server_security_group.name})",
+                      i_dst_server_security_group)
+        dst_server_security_groups.append(i_dst_server_security_group)
+    log_or_assert(args,
+                  "F.12 Destination OpenStack server - destination security groups exists",
+                  dst_server_security_groups)
+    return dst_server_security_groups
diff --git a/ci/project-migrator.py b/ci/project-migrator.py
index a512196b0592546bdee86bec7589651f0699b34f..14551b8158ed4d5b9981628fcf62c63ad02f1f49 100755
--- a/ci/project-migrator.py
+++ b/ci/project-migrator.py
@@ -45,19 +45,19 @@ def main(args):
     # connect to source cloud
     source_migrator_openrc = lib.get_openrc(args.source_openrc)
     source_migrator_conn = lib.get_ostack_connection(source_migrator_openrc)
-    args.logger.info("A.1 Source OpenStack cloud connected as migrator user")
+    args.logger.info("A.01 Source OpenStack cloud connected as migrator user")
 
     # connect to destination cloud
     destination_migrator_openrc = lib.get_openrc(args.destination_openrc)
     destination_migrator_conn = lib.get_ostack_connection(destination_migrator_openrc)
-    args.logger.info("A.2 Destination OpenStack cloud connected as migrator user")
+    args.logger.info("A.02 Destination OpenStack cloud connected as migrator user")
 
     # check project exists in source and destination
     source_project_name, destination_project_name = lib.get_ostack_project_names(args.project_name)
     source_project = lib.get_ostack_project(source_migrator_conn, source_project_name)
-    lib.log_or_assert(args, "B.1 Source OpenStack cloud project exists", source_project)
+    lib.log_or_assert(args, "B.01 Source OpenStack cloud project exists", source_project)
     source_project_type = lib.get_ostack_project_type(source_migrator_conn, source_project)
-    lib.log_or_assert(args, f"B.2 Source OpenStack cloud project type is {source_project_type}",
+    lib.log_or_assert(args, f"B.02 Source OpenStack cloud project type is {source_project_type}",
                   source_project_type)
 
     destination_project = lib.get_ostack_project(destination_migrator_conn, destination_project_name)
@@ -88,38 +88,33 @@ def main(args):
     # connect to migrator node
     reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
                                                                   args.ceph_migrator_sshkeyfile.name, 'uname -a')
-    lib.log_or_assert(args, "D.1 Migrator host is reachable", 'Linux' in reply_stdout and reply_ecode == 0)
+    lib.log_or_assert(args, "D.01 Migrator host is reachable", 'Linux' in reply_stdout and reply_ecode == 0)
 
     reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
                                                                   args.ceph_migrator_sshkeyfile.name,
                                                                   '/root/migrator/ceph-accessible.sh')
-    lib.log_or_assert(args, "D.2 Ceph is available from the migrator host", reply_ecode == 0)
+    lib.log_or_assert(args, "D.02 Ceph is available from the migrator host", reply_ecode == 0)
 
     source_rbd_images = {args.source_ceph_ephemeral_pool_name: None,
                          args.source_ceph_cinder_pool_name: None}
     for i_pool_name in source_rbd_images.keys():
         source_rbd_images[i_pool_name] = clib.ceph_rbd_images_list(args, i_pool_name)
-        lib.log_or_assert(args, f"D.3 Source cloud RBD images are received ({i_pool_name}).", source_rbd_images[i_pool_name])
+        lib.log_or_assert(args, f"D.03 Source cloud RBD images are received ({i_pool_name}).", source_rbd_images[i_pool_name])
 
-    source_keypairs = lib.get_source_keypairs(args)
-    lib.log_or_assert(args, "D.4 Source OpenStack cloud keypairs received.", source_keypairs)
+    source_keypairs = olib.get_source_keypairs(args)
+    lib.log_or_assert(args, "D.04 Source OpenStack cloud keypairs received.", source_keypairs)
 
     # get source/destination entities in the project
     source_project_servers = lib.get_ostack_project_servers(source_project_conn, source_project)
-    args.logger.info("E.1 Source OpenStack cloud servers received")
+    args.logger.info("E.01 Source OpenStack cloud servers received")
     lib.assert_entity_ownership(source_project_servers, source_project)
-    args.logger.info(f"E.2 Source OpenStack cloud project has {len(source_project_servers)} servers.")
-    source_project_flavors = lib.get_ostack_project_flavors(source_project_conn)
-    lib.log_or_assert(args, "E.4 Source OpenStack flavor list received", source_project_flavors)
+    args.logger.info(f"E.02 Source OpenStack cloud project has {len(source_project_servers)} servers.")
 
     destination_project_servers = lib.get_ostack_project_servers(destination_project_conn, destination_project)
     args.logger.info("E.10 Destination OpenStack cloud servers received")
     lib.assert_entity_ownership(destination_project_servers, destination_project)
     args.logger.info(f"E.11 Destination OpenStack cloud project has {len(destination_project_servers)} servers.")
 
-    destination_project_flavors = lib.get_ostack_project_flavors(destination_project_conn)
-    lib.log_or_assert(args, "E.12 Destination OpenStack flavor list received", destination_project_flavors)
-
     lib.log_or_assert(args, "E.20 Source OpenStack VM ID validation succeeded",
                       args.validation_a_source_server_id in [i_server.id for i_server in source_project_servers])
 
@@ -129,30 +124,31 @@ def main(args):
     destination_fip_network = destination_project_conn.network.find_network(args.destination_ipv4_external_network)
     lib.log_or_assert(args, "E.31 Destination cloud FIP network detected", destination_fip_network)
 
-    lib.duplicate_ostack_project_security_groups(args, source_project_conn, destination_project_conn,
-                                                 source_project, destination_project)
+    olib.duplicate_ostack_project_security_groups(args,
+                                                  source_project_conn, destination_project_conn,
+                                                  source_project, destination_project)
     args.logger.info("E.40 Destination OpenStack project security groups duplicated")
 
-    args.logger.info("F.0 Main looping started")
-    args.logger.info(f"F.0 Source VM servers: {[ i_source_server.name for i_source_server in source_project_servers]}")
+    args.logger.info("F.00 Main looping started")
+    args.logger.info(f"F.00 Source VM servers: {[ i_source_server.name for i_source_server in source_project_servers]}")
     for i_source_server in source_project_servers:
         i_source_server_detail = source_project_conn.compute.find_server(i_source_server.id)
         i_source_server_has_fip = lib.server_detect_floating_address(i_source_server_detail)
 
         if args.explicit_server_names and i_source_server.name not in args.explicit_server_names:
-            args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to --explicit-server-names={args.explicit_server_names}")
+            args.logger.info(f"F.01 server migration skipped - name:{i_source_server_detail.name} due to --explicit-server-names={args.explicit_server_names}")
             continue
 
         if i_source_server_detail.status != 'ACTIVE':
-            args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to VM status {i_source_server_detail.status}. Use --migrate-also-inactive-servers if necessary.")
+            args.logger.info(f"F.01 server migration skipped - name:{i_source_server_detail.name} due to VM status {i_source_server_detail.status}. Use --migrate-also-inactive-servers if necessary.")
             continue
         # detect destination VM does not exist
         i_destination_server_detail = destination_project_conn.compute.find_server(lib.get_dst_resource_name(args, i_source_server_detail.name))
         if i_destination_server_detail:
-            args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} as equivalent VM exists in destination cloud (name: {i_destination_server_detail.name})")
+            args.logger.info(f"F.01 server migration skipped - name:{i_source_server_detail.name} as equivalent VM exists in destination cloud (name: {i_destination_server_detail.name})")
             continue
 
-        args.logger.info(f"F.1 server migration started - name:{i_source_server_detail.name}, id:{i_source_server_detail.id}, " \
+        args.logger.info(f"F.01 server migration started - name:{i_source_server_detail.name}, id:{i_source_server_detail.id}, " \
                          f"keypair: {i_source_server_detail.key_name}, flavor: {i_source_server_detail.flavor}, " \
                          f"sec-groups:{i_source_server_detail.security_groups}, root_device_name: {i_source_server_detail.root_device_name}, " \
                          f"block_device_mapping: {i_source_server_detail.block_device_mapping}, " \
@@ -166,52 +162,22 @@ def main(args):
                                                      source_project, destination_project,
                                                      i_source_server_detail)
 
-
         # flavor detection
-        i_source_server_flavor_name = i_source_server_detail.flavor.name
-        i_destination_server_flavor_name = lib.get_destination_flavor(i_source_server_flavor_name)
-
-        lib.log_or_assert(args, f"F.5 Source to Destination flavor mapping succeeeded ({i_source_server_flavor_name}->{i_destination_server_flavor_name})",
-                      i_destination_server_flavor_name)
-        lib.log_or_assert(args,
-                          "F.6 Destination OpenStack flavor exists",
-                          [ i_flavor for i_flavor in destination_project_flavors if i_flavor.name == i_destination_server_flavor_name ])
+        i_destination_server_flavor = olib.get_dst_server_flavor(args,
+                                                                 i_source_server_detail,
+                                                                 destination_project_conn)
 
         # keypair detection / creation
-        i_source_server_keypair = lib.get_source_keypair(source_keypairs, i_source_server_detail.key_name, i_source_server_detail.user_id)
-        lib.log_or_assert(args, f"F.7 Source OpenStack server keypair found ({i_source_server_keypair['name']})", i_source_server_keypair)
-
-        i_destination_server_keypair = None
-        if i_destination_server_keypairs := [i_keypair for i_keypair in destination_project_conn.list_keypairs()
-                                               if i_keypair.name == lib.get_dst_resource_name(args, i_source_server_detail.key_name)]:
-            i_destination_server_keypair = i_destination_server_keypairs[0]
-            lib.log_or_assert(args, f"F.8 Destination OpenStack server keypair found already ({i_destination_server_keypair.name})", i_destination_server_keypair)
-        else:
-            i_destination_server_keypair = lib.create_keypair(args, destination_project_conn, i_source_server_keypair)
-            args.logger.info("F.8 Destination OpenStack server keypair created")
-        lib.log_or_assert(args, f"F.9 Destination OpenStack server keypair exists ({i_destination_server_keypair.name})", i_destination_server_keypair)
-
-        # server security group
-        i_destination_server_security_groups=[]
-        for i_source_server_security_group_name in {i_sg['name'] for i_sg in i_source_server_detail.security_groups}:
-            i_source_server_security_group = source_project_conn.network.find_security_group(i_source_server_security_group_name, project_id=source_project.id)
-            i_destination_server_security_group = None
-            if i_destination_server_security_group := destination_project_conn.network.find_security_group(lib.get_dst_resource_name(args, i_source_server_security_group.name),
-                                                                                                           project_id=destination_project.id):
-                lib.log_or_assert(args, f"F.10 Destination OpenStack server security group found already ({i_destination_server_security_group.name})",
-                              i_destination_server_security_group)
-            else:
-                args.logger.info("F.10 Destination OpenStack server matching security group not found and gets created.")
-                i_destination_server_security_group = lib.create_security_groups(args, source_project_conn, destination_project_conn,
-                                                                                 i_source_server_security_group, destination_project)
-                lib.log_or_assert(args, f"F.10 Destination OpenStack server security group created ({i_destination_server_security_group.name})",
-                              i_destination_server_security_group)
-
-            lib.log_or_assert(args, f"F.11 Destination OpenStack server security group exists ({i_destination_server_security_group.name})",
-                          i_destination_server_security_group)
-            i_destination_server_security_groups.append(i_destination_server_security_group)
-        lib.log_or_assert(args, "F.12 Destination OpenStack server - destination security groups exists",
-                      i_destination_server_security_groups)
+        i_destination_server_keypair = olib.get_or_create_dst_server_keypair(args, source_keypairs,
+                                                                             i_source_server_detail,
+                                                                             destination_project_conn)
+
+        # get / create server security groups
+        i_destination_server_security_groups = \
+            olib.get_or_create_dst_server_security_groups(args,
+                                                          source_project_conn, destination_project_conn,
+                                                          source_project, destination_project,
+                                                          i_source_server_detail)
 
         # volume detection
         i_server_block_device_mappings = [ ]
@@ -332,7 +298,6 @@ def main(args):
 
 
         # start server in destination cloud
-        i_destination_server_flavor = destination_project_conn.compute.find_flavor(i_destination_server_flavor_name)
         # Note: argument network is not valid anymore, use networks
         i_destination_server_args = {'name': lib.get_dst_resource_name(args, i_source_server_detail.name),
                                      'flavorRef': i_destination_server_flavor.id,