diff --git a/ci/lib.py b/ci/lib.py
index 841b5ec9a3f174c6be14b345fa0deca4a1e9236f..c60a1fe4d71418aa66eac8eaac3df264acf1cacf 100644
--- a/ci/lib.py
+++ b/ci/lib.py
@@ -1,7 +1,6 @@
 """ OpenStack project migrator library """
 
 import copy
-import json
 import re
 import pprint
 import time
@@ -182,9 +181,15 @@ def normalize_table_data(data):
         int_list.append(normalize_table_data_field(i_data_field['field']))
     return int_list
 
-def get_migrated_resource_name(args, name):
+def get_dst_resource_name(args, name=""):
     """ translate original name to destination one """
-    return f"{args.destination_entity_prefix}{name}"
+    return f"{args.destination_entity_name_prefix}{name}"
+
+def get_dst_resource_desc(args, desc="", fields=None):
+    """ translate original description to destination one and fill in optional fields """
+    if '{}' in args.destination_entity_description_suffix and fields:
+        return f"{desc}{args.destination_entity_description_suffix.format(fields)}"
+    return f"{desc}{args.destination_entity_description_suffix}"
 
 def get_openrc(file_handle):
     """ parse and return OpenRC file """
@@ -314,14 +319,16 @@ def get_source_keypair(keypairs, keypair_name, user_id):
 
 def create_keypair(args, ostack_connection, keypair):
     """ create openstack keypair object """
-    return ostack_connection.compute.create_keypair(name=get_migrated_resource_name(args, keypair['name']),
+    return ostack_connection.compute.create_keypair(name=get_dst_resource_name(args, keypair['name']),
                                                     public_key=keypair['public_key'], type=keypair['type'])
 
 def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_group, dst_project, recursion_stack=None):
     """ create openstack security group[s] """
     int_recursion_stack = {} if recursion_stack is None else recursion_stack
-    int_sg = dst_ostack_conn.network.create_security_group(name=get_migrated_resource_name(args, src_security_group.name),
-                                                           description=f"{src_security_group.description}, g1-to-g2-migrated(g1-id:{src_security_group.id})",
+    int_sg = dst_ostack_conn.network.create_security_group(name=get_dst_resource_name(args, src_security_group.name),
+                                                           description=get_dst_resource_desc(args,
+                                                                                             src_security_group.description,
+                                                                                             src_security_group.id),
                                                            project_id=dst_project.id)
     int_recursion_stack[src_security_group.id] = int_sg.id
 
@@ -337,7 +344,7 @@ def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_
                 i_mod_rule['remote_group_id'] = int_recursion_stack[i_mod_rule['remote_group_id']]
             # get linked source SG
             elif _src_sg := src_ostack_conn.network.find_security_group(i_mod_rule['remote_group_id']):
-                if _dst_sg := dst_ostack_conn.network.find_security_group(get_migrated_resource_name(args, _src_sg.name),
+                if _dst_sg := dst_ostack_conn.network.find_security_group(get_dst_resource_name(args, _src_sg.name),
                                                                           project_id=dst_project.id):
                     i_mod_rule['remote_group_id'] = _dst_sg.id
                 else:
@@ -360,7 +367,7 @@ def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_c
     for i_src_security_group in src_project_security_groups:
         j_dst_security_group_found = False
         for j_dst_security_group in tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id)):
-            if get_migrated_resource_name(args, i_src_security_group.name) == j_dst_security_group.name and \
+            if get_dst_resource_name(args, i_src_security_group.name) == j_dst_security_group.name and \
                i_src_security_group.id in j_dst_security_group.description:
                 j_dst_security_group_found = True
         if not j_dst_security_group_found:
@@ -432,7 +439,7 @@ def get_server_block_device_mapping(args, server_volume_attachment, server_volum
                        'ceph_pool_name': args.source_ceph_cinder_pool_name,
                        'ceph_rbd_image_name': server_volume.id},
             'destination': {'volume_size': server_volume.size,
-                            'volume_name': get_migrated_resource_name(args, server_volume.name),
+                            'volume_name': get_dst_resource_name(args, server_volume.name),
                             'volume_description': server_volume.description,
                             'volume_id': None,
                             'ceph_pool_name': args.destination_ceph_cinder_pool_name,
@@ -459,23 +466,24 @@ def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_pr
     dst_ext_network = dst_ostack_conn.network.find_network(args.destination_ipv4_external_network)
 
     # create network
-    dst_network_name = get_migrated_resource_name(args, src_network_name)
+    dst_network_name = get_dst_resource_name(args, src_network_name)
     dst_network = dst_ostack_conn.network.find_network(dst_network_name,
                                                        project_id=dst_project.id)
     if not dst_network:
         dst_network = dst_ostack_conn.network.create_network(name=dst_network_name,
                                                              project_id=dst_project.id,
                                                              mtu=src_network.mtu,
-                                                             description=f"{src_network.description}, g1 migrated id:{src_network.id}",
+                                                             description=get_dst_resource_desc(args,
+                                                                                               src_network.description,
+                                                                                               src_network.id),
                                                              port_security_enabled=src_network.is_port_security_enabled)
 
     # create subnets
     dst_subnets = []
     subnet_mapping = {}
     for i_src_subnet in src_subnets:
-        i_dst_subnet_name = get_migrated_resource_name(args, i_src_subnet.name)
-        i_dst_subnet = dst_ostack_conn.network.find_subnet(get_migrated_resource_name(args, i_src_subnet.name),
-                                                           project_id=dst_project.id)
+        i_dst_subnet_name = get_dst_resource_name(args, i_src_subnet.name)
+        i_dst_subnet = dst_ostack_conn.network.find_subnet(i_dst_subnet_name, project_id=dst_project.id)
         if not i_dst_subnet:
             i_dst_subnet = dst_ostack_conn.network.create_subnet(network_id=dst_network.id,
                                                                  name=i_dst_subnet_name,
@@ -487,7 +495,9 @@ def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_pr
                                                                  gateway_ip=i_src_subnet.gateway_ip,
                                                                  host_routes=i_src_subnet.host_routes,
                                                                  dns_nameservers=i_src_subnet.dns_nameservers,
-                                                                 description=f"{i_src_subnet.description}, g1 migrated id:{i_src_subnet.id}")
+                                                                 description=get_dst_resource_desc(args,
+                                                                                                   i_src_subnet.description,
+                                                                                                   i_src_subnet.id))
         subnet_mapping[i_src_subnet.id] = i_dst_subnet.id
         dst_subnets.append(i_dst_subnet)
 
@@ -495,16 +505,18 @@ def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_pr
     dst_network_routers = []
     for i_src_network_router, i_src_network_router_subnets in src_network_routers_subnets:
 
-        i_dst_network_router_name = get_migrated_resource_name(args, i_src_network_router.name)
+        i_dst_network_router_name = get_dst_resource_name(args, i_src_network_router.name)
         i_dst_network_router = dst_ostack_conn.network.find_router(i_dst_network_router_name,
                                                                    project_id=dst_project.id)
         if not i_dst_network_router:
             i_dst_network_router = dst_ostack_conn.network.create_router(name=i_dst_network_router_name,
-                                                                         description=f"{i_src_network_router.description}, g1 migrated id:{i_src_network_router.id}",
+                                                                         description=get_dst_resource_desc(args,
+                                                                                                           i_src_network_router.description,
+                                                                                                           i_src_network_router.id),
                                                                          project_id=dst_project.id,
                                                                          external_gateway_info={"network_id": dst_ext_network.id})
             for i_src_network_router_subnet in i_src_network_router_subnets:
-                # TODO: Principally there may be also foreign subnets, find more general sulution
+                # TODO: Principally there may be also foreign subnets, find more general solution
                 if i_src_network_router_subnet in subnet_mapping:
                     dst_ostack_conn.add_router_interface(i_dst_network_router, subnet_id=subnet_mapping[i_src_network_router_subnet])
 
@@ -523,8 +535,9 @@ def describe_server_network_connection(args, dst_ostack_conn, netaddr_dict):
     source_server_fixed_addresses = [i_addr['addr'] for i_addr in netaddr_dict['src-network-addresses']['addresses'] if i_addr.get('OS-EXT-IPS:type') == 'fixed']
     if len(source_server_fixed_addresses) == 1 and len(dst_network.subnet_ids) == 1:
         try:
-            fixed_port = dst_ostack_conn.network.create_port(name=get_migrated_resource_name(args, "unknown"),
-                                                             description="2",
+            port_desc = "A part of workload migration: created to get same server fixed-ips"
+            fixed_port = dst_ostack_conn.network.create_port(name=get_dst_resource_name(args),
+                                                             description=port_desc,
                                                              network_id=dst_network.id,
                                                              fixed_ips=[{"ip_address": source_server_fixed_addresses[0],
                                                                          "subnet_id": dst_network.subnet_ids[0]}])
diff --git a/ci/project-migrator.py b/ci/project-migrator.py
index a2bffe299bffeb4543bdd36ee0250b66dc3fdf4a..e5b623e623bac01f5f34748e555b84daeb403dde 100755
--- a/ci/project-migrator.py
+++ b/ci/project-migrator.py
@@ -144,7 +144,7 @@ def main(args):
             args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} due to VM status {i_source_server_detail.status}. Use --migrate-also-inactive-servers if necessary.")
             continue
         # detect destination VM does not exist
-        i_destination_server_detail = destination_project_conn.compute.find_server(lib.get_migrated_resource_name(args, i_source_server_detail.name))
+        i_destination_server_detail = destination_project_conn.compute.find_server(lib.get_dst_resource_name(args, i_source_server_detail.name))
         if i_destination_server_detail:
             args.logger.info(f"F.1 server migration skipped - name:{i_source_server_detail.name} as equivalent VM exists in destination cloud (name: {i_destination_server_detail.name})")
             continue
@@ -190,7 +190,7 @@ def main(args):
 
         i_destination_server_keypair = None
         if i_destination_server_keypairs := [i_keypair for i_keypair in destination_project_conn.list_keypairs()
-                                               if i_keypair.name == lib.get_migrated_resource_name(args, i_source_server_detail.key_name)]:
+                                               if i_keypair.name == lib.get_dst_resource_name(args, i_source_server_detail.key_name)]:
             i_destination_server_keypair = i_destination_server_keypairs[0]
             lib.log_or_assert(args, f"F.8 Destination OpenStack server keypair found already ({i_destination_server_keypair.name})", i_destination_server_keypair)
         else:
@@ -203,7 +203,7 @@ def main(args):
         for i_source_server_security_group_name in {i_sg['name'] for i_sg in i_source_server_detail.security_groups}:
             i_source_server_security_group = source_project_conn.network.find_security_group(i_source_server_security_group_name, project_id=source_project.id)
             i_destination_server_security_group = None
-            if i_destination_server_security_group := destination_project_conn.network.find_security_group(lib.get_migrated_resource_name(args, i_source_server_security_group.name),
+            if i_destination_server_security_group := destination_project_conn.network.find_security_group(lib.get_dst_resource_name(args, i_source_server_security_group.name),
                                                                                                            project_id=destination_project.id):
                 lib.log_or_assert(args, f"F.10 Destination OpenStack server security group found already ({i_destination_server_security_group.name})",
                               i_destination_server_security_group)
@@ -273,7 +273,7 @@ def main(args):
                                                                   'ceph_rbd_image_name': i_source_ceph_ephemeral_rbd_image,
                                                                   'ceph_rbd_image_size': i_source_ceph_ephemeral_rbd_image_size},
                                                        'destination': {'volume_size': i_source_ceph_ephemeral_rbd_image_size,
-                                                                       'volume_name': lib.get_migrated_resource_name(args, i_source_ceph_ephemeral_rbd_image),
+                                                                       'volume_name': lib.get_dst_resource_name(args, i_source_ceph_ephemeral_rbd_image),
                                                                        'volume_description': f"RBD {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}",
                                                                        'volume_id': None,
                                                                        'ceph_pool_name': args.destination_ceph_cinder_pool_name,
@@ -296,9 +296,11 @@ def main(args):
         for i_destination_server_block_device_mapping in i_server_block_device_mappings:
             i_new_volume_args = {'name': i_destination_server_block_device_mapping['destination']['volume_name'],
                                  'size': i_destination_server_block_device_mapping['destination']['volume_size'],
-                                 'description': f"{i_destination_server_block_device_mapping['destination']['volume_description']}, " \
-                                                f"g1-to-g2-migrated(g1-id:{i_destination_server_block_device_mapping['source']['volume_id']})"}
-            # TO BE REVISED: this seems to be the only way how to create bootable volume using openstacksdk
+                                 'description': lib.get_dst_resource_desc(args,
+                                                                          i_destination_server_block_device_mapping['destination']['volume_description'],
+                                                                          i_destination_server_block_device_mapping['source']['volume_id'])}
+
+            # TODO: this seems to be the only way how to create bootable volume using openstacksdk, check again
             if i_destination_server_block_device_mapping['destination']['volume_bootable']:
                 i_new_volume_args['imageRef'] = destination_image.id
 
@@ -337,7 +339,7 @@ def main(args):
         # start server in destination cloud
         i_destination_server_flavor = destination_project_conn.compute.find_flavor(i_destination_server_flavor_name)
         # Note: argument network is not valid anymore, use networks
-        i_destination_server_args = {'name': lib.get_migrated_resource_name(args, i_source_server_detail.name),
+        i_destination_server_args = {'name': lib.get_dst_resource_name(args, i_source_server_detail.name),
                                      'flavorRef': i_destination_server_flavor.id,
                                      'block_device_mapping_v2': [ {'source_type': 'volume',
                                                                    'destination_type': 'volume',
@@ -392,9 +394,11 @@ def main(args):
                                 "Note in-use volumes are being migrated in VM server migration part.")
                 continue
 
-            i_dst_volume = destination_project_conn.block_storage.create_volume(name=lib.get_migrated_resource_name(args, i_source_volume.name),
+            i_dst_volume = destination_project_conn.block_storage.create_volume(name=lib.get_dst_resource_name(args, i_source_volume.name),
                                                                                 size=i_source_volume.size,
-                                                                                description=f"{i_source_volume.description}, g1-to-g2-migrated(g1-id:{i_source_volume.id})")
+                                                                                description=lib.get_dst_resource_desc(args,
+                                                                                                                      i_source_volume.description,
+                                                                                                                      i_source_volume.id))
             lib.log_or_assert(args,
                             f"H.3 Destination OpenStack volume created (name:{i_dst_volume.name}, id:{i_dst_volume.id})", i_dst_volume)
             i_dst_volume_status = lib.wait_for_ostack_volume_status(destination_project_conn, i_dst_volume.id, 'available')
@@ -445,8 +449,10 @@ if __name__ == "__main__":
                     help='Destination cloud bootable volumes are made on top of public image. Name of destination cloud image.')
     AP.add_argument('--destination-ipv4-external-network', default='external-ipv4-general-public',
                     help='Destination cloud IPV4 external network.')
-    AP.add_argument('--destination-entity-prefix', default='migrated-',
-                    help='Destination cloud migrated cloud entity names prefix.')
+    AP.add_argument('--destination-entity-name-prefix', default='migrated-',
+                    help='Destination cloud entity name prefix.')
+    AP.add_argument('--destination-entity-description-suffix', default=', migrated(id:{})',
+                    help='Destination cloud entity description suffix.')
 
     AP.add_argument('--project-name', default=None, required=True,
                     help='OpenStack project name (identical name in both clouds required)')
@@ -463,15 +469,18 @@ if __name__ == "__main__":
     AP.add_argument('--exception-trace-file', default="project-migrator.dump",
                     required=False,
                     help='Exception / assert dump state file')
+    AP.add_argument('--log-level', default="INFO", required=False,
+                    choices=[i_lvl for i_lvl in dir(logging) if i_lvl.isupper() and i_lvl.isalpha()],
+                    help='Executio log level (python logging)')
     AP.add_argument('--debugging', default=False, required=False, action='store_true',
                     help='(Optional) Enter custom development debugging mode.')
 
-    logging.basicConfig(level=logging.INFO,  # Set the logging level
-                        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     ARGS = AP.parse_args()
     ARGS.logger = logging.getLogger("project-migrator")
     ARGS.explicit_server_names = lib.get_resource_names_ids(ARGS.explicit_server_names)
     ARGS.explicit_volume_names = lib.get_resource_names_ids(ARGS.explicit_volume_names)
+    logging.basicConfig(level=getattr(logging, ARGS.log_level),
+                        format='%(asctime)s %(name)s %(levelname)s %(message)s')
     if ARGS.debugging:
         import IPython
         #IPython.embed()