From 2c9e66005d82ee67edf0b7e88b5d236232d70d17 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Josef=20Smr=C4=8Dka?= <253466@mail.muni.cz>
Date: Wed, 24 Jul 2024 11:37:26 +0200
Subject: [PATCH] fix: python formatting and pylint issues

Re-format all Python code.

Partially fix pylint issues.
---
 ci/clib.py                            | 190 +++++++++----------
 ci/generate-data-for-communication.py |  80 ++++----
 ci/lib.py                             |  75 ++++----
 ci/olib.py                            | 253 ++++++++++++++------------
 ci/project-flavor-migration-check.py  |  10 +-
 ci/project-migrator.py                |  73 ++++----
 6 files changed, 347 insertions(+), 334 deletions(-)

diff --git a/ci/clib.py b/ci/clib.py
index 6b4c418..df9a643 100644
--- a/ci/clib.py
+++ b/ci/clib.py
@@ -5,12 +5,14 @@ import os.path
 
 from lib import remote_cmd_exec, log_or_assert
 
+
 def get_ceph_client_name(args, ceph_src_pool_name, ceph_dst_pool_name=None):
     """ identify which ceph user to use for planned ceph operation """
     int_pool_name = ceph_dst_pool_name if ceph_dst_pool_name else ceph_src_pool_name
 
     return "client.cinder" if int_pool_name in (args.source_ceph_cinder_pool_name, args.source_ceph_ephemeral_pool_name,) else "client.migrator"
 
+
 def ceph_rbd_images_list(args, pool_name):
     """ list ceph RBD images in pool named pool_name """
     script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-images-list.sh')
@@ -22,6 +24,7 @@ def ceph_rbd_images_list(args, pool_name):
     assert ecode == 0, f"RBD pool ({pool_name}) images received successfully (ecode)"
     return stdout.splitlines()
 
+
 def ceph_rbd_image_info(args, pool_name, rbd_image_name):
     """ get ceph RBD image information """
     ceph_client_name = get_ceph_client_name(args, pool_name)
@@ -33,7 +36,6 @@ def ceph_rbd_image_info(args, pool_name, rbd_image_name):
     return json.loads(stdout), stderr, ecode
 
 
-
 def ceph_rbd_image_exists(args, pool_name, rbd_image_name):
     """ detect whether RBD image {pool_name}/{rbd_image_name} exists """
     ceph_client_name = get_ceph_client_name(args, pool_name)
@@ -44,6 +46,7 @@ def ceph_rbd_image_exists(args, pool_name, rbd_image_name):
                                             f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name}")
     return stdout.splitlines(), stderr, ecode
 
+
 def ceph_rbd_image_delete(args, pool_name, rbd_image_name):
     """ delete RBD image {pool_name}/{rbd_image_name} """
     ceph_client_name = get_ceph_client_name(args, pool_name)
@@ -54,6 +57,7 @@ def ceph_rbd_image_delete(args, pool_name, rbd_image_name):
                                             f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name}")
     return stdout.splitlines(), stderr, ecode
 
+
 def ceph_rbd_image_flatten(args, pool_name, rbd_image_name):
     """ flatten RBD image {pool_name}/{rbd_image_name} """
     ceph_client_name = get_ceph_client_name(args, pool_name)
@@ -64,6 +68,7 @@ def ceph_rbd_image_flatten(args, pool_name, rbd_image_name):
                                             f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name}")
     return stdout.splitlines(), stderr, ecode
 
+
 def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_snapshot_name,
                          dst_pool_name, dst_rbd_image_name):
     """ clone RBD image {src_pool_name}/{src_rbd_image_name}@{src_rbd_image_snapshot_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
@@ -76,6 +81,7 @@ def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_
                                             cmd)
     return stdout.splitlines(), stderr, ecode
 
+
 def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name):
     """ copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
     ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
@@ -99,6 +105,7 @@ def ceph_rbd_image_snapshot_exists(args, pool_name, rbd_image_name, rbd_image_sn
                                             f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
     return stdout.splitlines(), stderr, ecode
 
+
 def ceph_rbd_image_snapshot_create(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
     """ create RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
     ceph_client_name = get_ceph_client_name(args, pool_name)
@@ -109,6 +116,7 @@ def ceph_rbd_image_snapshot_create(args, pool_name, rbd_image_name, rbd_image_sn
                                             f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
     return stdout.splitlines(), stderr, ecode
 
+
 def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
     """ delete RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
     ceph_client_name = get_ceph_client_name(args, pool_name)
@@ -119,172 +127,170 @@ def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_sn
                                             f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
     return stdout.splitlines(), stderr, ecode
 
+
 def migrate_rbd_image(args, server_block_device_mapping):
     """ migrate source (G1) ceph RBD image to destination (G2) ceph """
 
     ## G1: detect existing G1 RBD image
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
-    source_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
-                                                                    server_block_device_mapping['source']['ceph_pool_name'],
-                                                                    server_block_device_mapping['source']['ceph_rbd_image_name'])
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
+    source_server_rbd_images, _, ecode = ceph_rbd_image_exists(args,
+                                                               server_block_device_mapping['source']['ceph_pool_name'],
+                                                               server_block_device_mapping['source']['ceph_rbd_image_name'])
     log_or_assert(args, "G.01 Source OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
     log_or_assert(args, "G.01 Source OpenStack VM RBD image exists - single image returned",
                   source_server_rbd_images and len(source_server_rbd_images) == 1, locals())
     source_server_rbd_image = source_server_rbd_images[0]
 
-
     ## G2: find volume
-    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
-    destination_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
-                                                                         server_block_device_mapping['destination']['ceph_pool_name'],
-                                                                         server_block_device_mapping['destination']['volume_id'])
+    # CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
+    destination_server_rbd_images, _, ecode = ceph_rbd_image_exists(args,
+                                                                    server_block_device_mapping['destination']['ceph_pool_name'],
+                                                                    server_block_device_mapping['destination']['volume_id'])
     log_or_assert(args, "G.02 Destination OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
     log_or_assert(args, "G.02 Destination OpenStack VM RBD image exists - single image returned",
                   destination_server_rbd_images and len(destination_server_rbd_images) == 1, locals())
     destination_server_rbd_image = destination_server_rbd_images[0]
 
     ## G1: create RBD image protected snapshot
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
     source_rbd_image_snapshot_name = f"g1-g2-migration-{source_server_rbd_image}"
-    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
-                                                           server_block_device_mapping['source']['ceph_pool_name'],
-                                                           source_server_rbd_image,
-                                                           source_rbd_image_snapshot_name)
+    _, _, ecode = ceph_rbd_image_snapshot_exists(args,
+                                                 server_block_device_mapping['source']['ceph_pool_name'],
+                                                 source_server_rbd_image,
+                                                 source_rbd_image_snapshot_name)
     log_or_assert(args,
-                  "G.03 Source OpenStack VM RBD image has non-colliding snapshot " \
+                  "G.03 Source OpenStack VM RBD image has non-colliding snapshot "
                   f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
                   ecode != 0, locals())
 
-    stdout, stderr, ecode = ceph_rbd_image_snapshot_create(args,
-                                                           server_block_device_mapping['source']['ceph_pool_name'],
-                                                           source_server_rbd_image,
-                                                           source_rbd_image_snapshot_name)
+    _, _, ecode = ceph_rbd_image_snapshot_create(args,
+                                                 server_block_device_mapping['source']['ceph_pool_name'],
+                                                 source_server_rbd_image,
+                                                 source_rbd_image_snapshot_name)
     log_or_assert(args,
-                  "G.04 Source OpenStack VM RBD image snapshot created " \
+                  "G.04 Source OpenStack VM RBD image snapshot created "
                   f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
                   ecode == 0, locals())
 
-
-    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
-                                                           server_block_device_mapping['source']['ceph_pool_name'],
-                                                           source_server_rbd_image,
-                                                           source_rbd_image_snapshot_name)
+    _, _, ecode = ceph_rbd_image_snapshot_exists(args,
+                                                 server_block_device_mapping['source']['ceph_pool_name'],
+                                                 source_server_rbd_image,
+                                                 source_rbd_image_snapshot_name)
     log_or_assert(args,
-                  "G.05 Source OpenStack VM RBD image snapshot exists " \
+                  "G.05 Source OpenStack VM RBD image snapshot exists "
                   f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
                   ecode == 0, locals())
 
     ## G2: delete RBD image
-    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
+    # CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
     ## G2: confirm volume is deleted
-    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
-    stdout, stderr, ecode = ceph_rbd_image_delete(args,
-                                                  server_block_device_mapping['destination']['ceph_pool_name'],
-                                                  destination_server_rbd_image)
+    # CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
+    _, _, ecode = ceph_rbd_image_delete(args,
+                                        server_block_device_mapping['destination']['ceph_pool_name'],
+                                        destination_server_rbd_image)
     log_or_assert(args,
                   f"G.06 Destination OpenStack VM RBD image deletion succeeded ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
                   ecode == 0, locals())
-    stdout, stderr, ecode = ceph_rbd_image_exists(args,
-                                                  server_block_device_mapping['destination']['ceph_pool_name'],
-                                                  destination_server_rbd_image)
+    _, _, ecode = ceph_rbd_image_exists(args,
+                                        server_block_device_mapping['destination']['ceph_pool_name'],
+                                        destination_server_rbd_image)
     log_or_assert(args,
                   f"G.07 Destination OpenStack VM RBD image does not exist ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
                   ecode != 0, locals())
 
-
     ## G1: clone from snapshot
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
     source_rbd_cloned_image_name = f"g1-g2-migration-{source_server_rbd_image}"
-    stdout, stderr, ecode = ceph_rbd_image_clone(args,
-                                                 server_block_device_mapping['source']['ceph_pool_name'],
-                                                 source_server_rbd_image,
-                                                 source_rbd_image_snapshot_name,
-                                                 server_block_device_mapping['source']['ceph_pool_name'],
-                                                 source_rbd_cloned_image_name)
+    _, _, ecode = ceph_rbd_image_clone(args,
+                                       server_block_device_mapping['source']['ceph_pool_name'],
+                                       source_server_rbd_image,
+                                       source_rbd_image_snapshot_name,
+                                       server_block_device_mapping['source']['ceph_pool_name'],
+                                       source_rbd_cloned_image_name)
     log_or_assert(args,
-                  "G.08 Source OpenStack VM RBD image cloned succesfully " \
-                  f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name} -> " \
+                  "G.08 Source OpenStack VM RBD image cloned succesfully "
+                  f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name} -> "
                   f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                   ecode == 0, locals())
-    stdout, stderr, ecode = ceph_rbd_image_exists(args,
-                                                  server_block_device_mapping['source']['ceph_pool_name'],
-                                                  source_rbd_cloned_image_name)
+    _, _, ecode = ceph_rbd_image_exists(args,
+                                        server_block_device_mapping['source']['ceph_pool_name'],
+                                        source_rbd_cloned_image_name)
     log_or_assert(args,
                   f"G.09 Source OpenStack VM cloned RBD image exists ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                   ecode == 0, locals())
 
     ## G1: flatten cloned RBD image
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
-    stdout, stderr, ecode = ceph_rbd_image_flatten(args,
-                                                   server_block_device_mapping['source']['ceph_pool_name'],
-                                                   source_rbd_cloned_image_name)
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
+    _, _, ecode = ceph_rbd_image_flatten(args,
+                                         server_block_device_mapping['source']['ceph_pool_name'],
+                                         source_rbd_cloned_image_name)
     log_or_assert(args,
                   f"G.10 Source OpenStack VM cloned RBD image flatten successfully ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                   ecode == 0, locals())
 
     ## G1->G2: copy RBD image to target pool
-    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
-    #CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
-    stdout, stderr, ecode = ceph_rbd_image_copy(args,
-                                                server_block_device_mapping['source']['ceph_pool_name'],
-                                                source_rbd_cloned_image_name,
-                                                server_block_device_mapping['destination']['ceph_pool_name'],
-                                                destination_server_rbd_image)
+    # CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
+    # CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
+    _, _, ecode = ceph_rbd_image_copy(args,
+                                      server_block_device_mapping['source']['ceph_pool_name'],
+                                      source_rbd_cloned_image_name,
+                                      server_block_device_mapping['destination']['ceph_pool_name'],
+                                      destination_server_rbd_image)
     log_or_assert(args,
-                  "G.11 Source OpenStack VM RBD image copied G1 -> G2 succesfully" \
-                  f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name} -> " \
+                  "G.11 Source OpenStack VM RBD image copied G1 -> G2 succesfully"
+                  f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name} -> "
                   f"{server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image}",
                   ecode == 0, locals())
-    stdout, stderr, ecode = ceph_rbd_image_exists(args,
-                                                  server_block_device_mapping['destination']['ceph_pool_name'],
-                                                  destination_server_rbd_image)
+    _, _, ecode = ceph_rbd_image_exists(args,
+                                        server_block_device_mapping['destination']['ceph_pool_name'],
+                                        destination_server_rbd_image)
     log_or_assert(args,
                   f"G.12 Destination OpenStack VM RBD image exists ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
                   ecode == 0, locals())
 
     ## G1: delete cloned RBD image
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
-    stdout, stderr, ecode = ceph_rbd_image_delete(args,
-                                                  server_block_device_mapping['source']['ceph_pool_name'],
-                                                  source_rbd_cloned_image_name)
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
+    _, _, ecode = ceph_rbd_image_delete(args,
+                                        server_block_device_mapping['source']['ceph_pool_name'],
+                                        source_rbd_cloned_image_name)
     log_or_assert(args,
                   f"G.13 Source OpenStack VM RBD cloned image deletion succeeded ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                   ecode == 0, locals())
-    stdout, stderr, ecode = ceph_rbd_image_exists(args,
-                                                  server_block_device_mapping['source']['ceph_pool_name'],
-                                                  source_rbd_cloned_image_name)
+    _, _, ecode = ceph_rbd_image_exists(args,
+                                        server_block_device_mapping['source']['ceph_pool_name'],
+                                        source_rbd_cloned_image_name)
     log_or_assert(args,
                   f"G.14 Source OpenStack VM cloned RBD image does not exist anymore ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
                   ecode != 0, locals())
 
     ## G1: remove created snapshot
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
-    #CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
-    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
-                                                           server_block_device_mapping['source']['ceph_pool_name'],
-                                                           source_server_rbd_image,
-                                                           source_rbd_image_snapshot_name)
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
+    # CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
+    _, _, ecode = ceph_rbd_image_snapshot_exists(args,
+                                                 server_block_device_mapping['source']['ceph_pool_name'],
+                                                 source_server_rbd_image,
+                                                 source_rbd_image_snapshot_name)
     log_or_assert(args,
-                  "G.15 Source OpenStack VM RBD image snapshot still exists " \
+                  "G.15 Source OpenStack VM RBD image snapshot still exists "
                   f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
                   ecode == 0, locals())
-    stdout, stderr, ecode = ceph_rbd_image_snapshot_delete(args,
-                                                           server_block_device_mapping['source']['ceph_pool_name'],
-                                                           source_server_rbd_image,
-                                                           source_rbd_image_snapshot_name)
+    _, _, ecode = ceph_rbd_image_snapshot_delete(args,
+                                                 server_block_device_mapping['source']['ceph_pool_name'],
+                                                 source_server_rbd_image,
+                                                 source_rbd_image_snapshot_name)
     log_or_assert(args,
-                  "G.16 Source OpenStack VM RBD image snapshot deletion succeeeded " \
+                  "G.16 Source OpenStack VM RBD image snapshot deletion succeeeded "
                   f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
                   ecode == 0, locals())
-    stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
-                                                           server_block_device_mapping['source']['ceph_pool_name'],
-                                                           source_server_rbd_image,
-                                                           source_rbd_image_snapshot_name)
+    _, _, ecode = ceph_rbd_image_snapshot_exists(args,
+                                                 server_block_device_mapping['source']['ceph_pool_name'],
+                                                 source_server_rbd_image,
+                                                 source_rbd_image_snapshot_name)
     log_or_assert(args,
-                  "G.17 Source OpenStack VM RBD image snapshot does not exist anymore " \
+                  "G.17 Source OpenStack VM RBD image snapshot does not exist anymore "
                   f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
                   ecode != 0, locals())
diff --git a/ci/generate-data-for-communication.py b/ci/generate-data-for-communication.py
index bf22188..3c4d59e 100755
--- a/ci/generate-data-for-communication.py
+++ b/ci/generate-data-for-communication.py
@@ -3,7 +3,7 @@
 OpenStack project data extractor.
 
 Tool which provides OpenStack project data, which is used to generate email to user. 
-It's used in ostack-einfra_cz-trigger-communication-generation pipeline job to generate csv files, which are send to pipiline in kb generate-communication.
+It's used in ostack-einfra_cz-trigger-communication-generation pipeline job to generate csv files, which are send to pipeline in kb generate-communication.
 
 
 Tool relies on main libraries:
@@ -20,14 +20,13 @@ Usage example:
 """
 
 import argparse
+import csv
 import logging
 import sys
-import csv
-import yaml
+from datetime import date
 
 import lib
-import olib
-from datetime import date
+
 
 def main(args):
     """ main project migration loop """
@@ -57,77 +56,81 @@ def main(args):
     destination_project_conn = lib.get_ostack_connection(destination_migrator_openrc | {'OS_PROJECT_NAME': destination_project.name})
 
     # get source/destination entities in the project
-    source_project_servers = lib.get_ostack_project_servers(source_project_conn, source_project)
+    source_project_servers = lib.get_ostack_project_servers(source_project_conn)
     args.logger.info("C.01 Source OpenStack cloud servers received")
     lib.assert_entity_ownership(source_project_servers, source_project)
-    
-    destination_project_servers = lib.get_ostack_project_servers(destination_project_conn, destination_project)
+
+    destination_project_servers = lib.get_ostack_project_servers(destination_project_conn)
     args.logger.info("C.02 Destination OpenStack cloud servers received")
     lib.assert_entity_ownership(destination_project_servers, destination_project)
 
     # prepare project data
-    today = date.today()    
+    today = date.today()
     vm_count = len(destination_project_servers)
     signature = args.signature
     project_expiration = args.expiration
-    
+
     project_data = [
         {
-        "project_name": source_project_name,
-        "migration_date": today,
-        "project_expiration": project_expiration,
-        "vm_count": vm_count,
-        "signature": signature,
-        "servers": "servers.csv"
+            "project_name": source_project_name,
+            "migration_date": today,
+            "project_expiration": project_expiration,
+            "vm_count": vm_count,
+            "signature": signature,
+            "servers": "servers.csv"
         }
     ]
-    args.logger.info("D.01 Basic information about migrated project gathered")            
-    
+    args.logger.info("D.01 Basic information about migrated project gathered")
+
     # prepare server data
     servers_data = []
     for server in destination_project_servers:
         server_info = {
-           "g1_name" : "",
-           "g1_id"   : "",
-           "g1_fip"  : "",
-           "g2_name" : server.name,
-           "g2_id"   : server.id,
-           "g2_fip"  : get_fip(server.addresses.items())
+            "g1_name": "",
+            "g1_id": "",
+            "g1_fip": "",
+            "g2_name": server.name,
+            "g2_id": server.id,
+            "g2_fip": get_fip(server)
         }
 
         for source_server in source_project_servers:
-            dest_server_name = server.name.replace('migrated-','')
+            dest_server_name = server.name.replace('migrated-', '')
             if source_server.name == dest_server_name:
                 server_info['g1_name'] = source_server.name
-                server_info['g1_id']   = source_server.id
-                server_info['g1_fip']  = get_fip(source_server.addresses.items())
-                break    
+                server_info['g1_id'] = source_server.id
+                server_info['g1_fip'] = get_fip(source_server)
+                break
         servers_data.append(server_info)
     args.logger.info("D.02 Information about migrated servers gathered")
 
-    #generate csv files which is lately send to kb job
+    # generate csv files which is lately send to kb job
     data_fieldnames = ["project_name", "migration_date", "project_expiration", "vm_count", "signature", "servers"]
     write_csv("data.csv", data_fieldnames, project_data)
     args.logger.info("E.01 file 'data.csv' containing project data created.")
-    
+
     servers_fieldnames = ["g1_name", "g1_id", "g1_fip", "g2_name", "g2_id", "g2_fip"]
     write_csv("servers.csv", servers_fieldnames, servers_data)
     args.logger.info("E.02 file 'servers.csv' containing migrated servers data created.")
 
+
 # Function to write data to a CSV file
 def write_csv(file_name, fieldnames, data):
-    with open(file_name, mode='w', newline='') as file:
+    """ Output CVS data into a file """
+    with open(file_name, mode='w', newline='', encoding='utf_8') as file:
         writer = csv.DictWriter(file, fieldnames=fieldnames, delimiter=';')
         writer.writeheader()
         writer.writerows(data)
 
-def get_fip(server_addresses_items):
-        for network_name,ip_info_list in server_addresses_items:
-            for ip_info in ip_info_list:
-                addr = ip_info.get('addr')
-                ip_type = ip_info.get('OS-EXT-IPS:type')
-                if ip_type == 'floating':
-                    return addr
+
+def get_fip(server):
+    """ Return a floating IP of a server """
+    for ip_info_list in server.addresses.values():
+        for ip_info in ip_info_list:
+            if ip_info.get('OS-EXT-IPS:type') == 'floating':
+                return ip_info.get('addr')
+    return None
+
 
 # main() call (argument parsing)
 # ---------------------------------------------------------------------------
@@ -158,4 +161,3 @@ if __name__ == "__main__":
                         format='%(asctime)s %(name)s %(levelname)s %(message)s')
 
     sys.exit(main(ARGS))
-    
\ No newline at end of file
diff --git a/ci/lib.py b/ci/lib.py
index 83bcca6..199686a 100644
--- a/ci/lib.py
+++ b/ci/lib.py
@@ -1,28 +1,31 @@
 """ OpenStack project migrator library """
 
-import re
-import pprint
-import time
 import os
 import os.path
+import pprint
+import re
+import time
 
-import paramiko
 import openstack
-from keystoneauth1.identity import v3
+import paramiko
 from keystoneauth1 import session
+from keystoneauth1.identity import v3
 
 BOOLEAN_CHOICES = ["True", "true", "False", "false"]
 
-def wait_for_keypress(msg="Press Enter to continue..."):
+
+def wait_for_keypress():
     """ wait for enter keypress """
     return input("Press Enter to continue...")
 
+
 def get_resource_names_ids(resources):
     """ parses list of resource names/IDs separated by space of comma returned as list of strings or None """
     if isinstance(resources, str) and resources:
-        return resources.replace(","," ").split()
+        return resources.replace(",", " ").split()
     return None
 
+
 def trim_dict(dict_data, allowed_keys=None, denied_keys=None):
     """ transform input dictionary and filter its keys with allowed_keys and denied_keys sequences """
     int_allowed_keys = allowed_keys if allowed_keys else tuple()
@@ -33,10 +36,12 @@ def trim_dict(dict_data, allowed_keys=None, denied_keys=None):
         return {i_key: dict_data[i_key] for i_key in dict_data if i_key not in int_denied_keys}
     return dict_data
 
+
 def executed_as_admin_user_in_ci():
     """ identity the script user within CI pipeline """
     return os.environ.get('GITLAB_USER_LOGIN') in ('246254', '252651', 'Jan.Krystof', 'moravcova', '469240', 'Josef.Nemec', '247801', '253466', '252985')
 
+
 def executed_in_ci():
     """ detect CI environment """
     envvar_names = ('CI_JOB_NAME', 'CI_REPOSITORY_URL', 'GITLAB_USER_LOGIN')
@@ -49,6 +54,7 @@ def get_ostack_project_names(project_name):
         return project_name.split('->', 1)
     return project_name, project_name
 
+
 def get_destination_subnet(source_subnet):
     """ LUT for networks """
     subnet_mapping = {
@@ -56,11 +62,12 @@ def get_destination_subnet(source_subnet):
 
         # group project internal network
         "group-project-network-subnet": "group-project-network-subnet"
-        }
-    if source_subnet in subnet_mapping.keys():
+    }
+    if source_subnet in subnet_mapping:
         return subnet_mapping[source_subnet]
     return None
 
+
 def get_destination_router(source_router):
     """ LUT for networks """
     router_mapping = {
@@ -68,8 +75,8 @@ def get_destination_router(source_router):
 
         # group project internal network
         "router": "group-project-router"
-        }
-    if source_router in router_mapping.keys():
+    }
+    if source_router in router_mapping:
         return router_mapping[source_router]
     return None
 
@@ -79,10 +86,11 @@ def normalize_table_data_field(data_field):
     int_dict = {}
     i_name_key = '@name'
     for i_data_field_item in data_field:
-        i_value_key = [ i_k for i_k in i_data_field_item.keys() if i_k != i_name_key][0]
+        i_value_key = [i_k for i_k in i_data_field_item.keys() if i_k != i_name_key][0]
         int_dict[i_data_field_item[i_name_key]] = i_data_field_item[i_value_key]
     return int_dict
 
+
 def normalize_table_data(data):
     """ normalize whole table data """
     int_list = []
@@ -90,20 +98,24 @@ def normalize_table_data(data):
         int_list.append(normalize_table_data_field(i_data_field['field']))
     return int_list
 
+
 def get_dst_secgroup_name(args, name=""):
     """ translate original secgroup name to destination one """
     return f"{args.destination_secgroup_name_prefix}{name}"
 
+
 def get_dst_resource_name(args, name=""):
     """ translate original name to destination one """
     return f"{args.destination_entity_name_prefix}{name}"
 
+
 def get_dst_resource_desc(args, desc="", fields=None):
     """ translate original description to destination one and fill in optional fields """
     if '{}' in args.destination_entity_description_suffix and fields:
         return f"{desc}{args.destination_entity_description_suffix.format(fields)}"
     return f"{desc}{args.destination_entity_description_suffix}"
 
+
 def get_openrc(file_handle):
     """ parse and return OpenRC file """
     openrc_vars = {}
@@ -116,7 +128,7 @@ def get_openrc(file_handle):
 
 
 def get_ostack_connection(openrc_vars):
-    """ """
+    """ create a connection to a cloud and return the Connection object """
     auth_args = {
         'auth_url': openrc_vars.get('OS_AUTH_URL'),
         'username': openrc_vars.get('OS_USERNAME'),
@@ -137,45 +149,27 @@ def get_ostack_connection(openrc_vars):
     ostack_conn = openstack.connection.Connection(session=ostack_sess, **connection_args)
     return ostack_conn
 
+
 def get_ostack_project(ostack_connection, project_name):
+    """ return a project by name """
     project = None
     for i_project in ostack_connection.list_projects():
         if i_project.name == project_name:
             project = i_project
     return project
 
+
 def get_ostack_project_type(ostack_connection, project):
     """ detect project type, return 'group' / 'personal' / 'other' """
-    if project.name in [ i_user.name for i_user in ostack_connection.list_users() ]:
+    if project.name in [i_user.name for i_user in ostack_connection.list_users()]:
         return "personal"
     return "group"
 
-def get_ostack_project_security_groups(ostack_connection, project=None):
-    security_groups = []
-    if project:
-        for i_security_group in ostack_connection.network.security_groups():
-            if i_security_group.tenant_id == project.id:
-                security_groups.append(i_security_group)
-        return security_groups
-    return tuple(ostack_connection.network.security_groups())
-
-def get_ostack_project_keypairs(ostack_connection, project=None):
-    return ostack_connection.list_keypairs()
-def get_ostack_project_keypairs2(ostack_connection, project=None):
-    return list(ostack_connection.compute.keypairs())
 
-
-def get_ostack_project_servers(ostack_connection, project=None):
+def get_ostack_project_servers(ostack_connection):
+    """ return list of project servers """
     return tuple(ostack_connection.compute.servers())
 
-def get_ostack_project_volumes(ostack_connection, project=None):
-    return ostack_connection.block_store.volumes()
-
-def get_resource_details(resources):
-    """ inspect resources """
-    for i_resource in resources:
-        print(i_resource)
-        pprint.pprint(i_resource)
 
 def remote_cmd_exec(hostname, username, key_filename, command):
     """ executes remote command, returs stdout, stderr and exit-code or Exception """
@@ -183,7 +177,6 @@ def remote_cmd_exec(hostname, username, key_filename, command):
     ssh_client = paramiko.SSHClient()
     # Automatically add untrusted hosts
     ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-    ecode = None
 
     try:
         # Connect to the remote host
@@ -191,7 +184,7 @@ def remote_cmd_exec(hostname, username, key_filename, command):
         ssh_client.connect(hostname, username=username, pkey=pkey, look_for_keys=False)
 
         # Execute the command, read the output and close
-        stdin, stdout, stderr = ssh_client.exec_command(command)
+        _, stdout, stderr = ssh_client.exec_command(command)
         output = stdout.read().decode().strip()
         error = stderr.read().decode().strip()
         ecode = stdout.channel.recv_exit_status()
@@ -210,8 +203,6 @@ def assert_entity_ownership(entities, project):
         assert i_entity.project_id == project.id, f"Entity belongs to expected project (id: {project.id})"
 
 
-
-
 def log_or_assert(args, msg, condition, trace_details=None, msg_guidance=''):
     """ log, assert, dump state """
     if not condition:
@@ -236,6 +227,7 @@ def wait_for_ostack_server_status(ostack_connection, server_name_or_id, server_s
 
     return int_server_status
 
+
 def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_status, timeout=300):
     """ wait for volume getting expected state """
     int_start_timestamp = time.time()
@@ -249,5 +241,4 @@ def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_s
             break
         time.sleep(10)
 
-
     return int_volume_status
diff --git a/ci/olib.py b/ci/olib.py
index e7040c0..bc967e3 100644
--- a/ci/olib.py
+++ b/ci/olib.py
@@ -6,20 +6,21 @@ import ipaddress
 import math
 import os.path
 
-import xmltodict
 import openstack
 import openstack.exceptions
+import xmltodict
 
 import clib
 from lib import log_or_assert, get_dst_resource_name, get_dst_secgroup_name, get_dst_resource_desc, remote_cmd_exec, normalize_table_data, trim_dict, wait_for_ostack_volume_status
 
+
 def get_destination_network(source_network):
     """ LUT for networks """
     network_mapping = {
         # shared
-        "78-128-250-pers-proj-net" :  "internal-ipv4-general-private",
-        "147-251-115-pers-proj-net" : "internal-ipv4-general-private",
-        "public-muni-v6-432" :        "external-ipv6-general-public",
+        "78-128-250-pers-proj-net": "internal-ipv4-general-private",
+        "147-251-115-pers-proj-net": "internal-ipv4-general-private",
+        "public-muni-v6-432": "external-ipv6-general-public",
         # external
         "public-muni-147-251-21-GROUP": "external-ipv4-general-public",
         "public-cesnet-78-128-250-PERSONAL": "external-ipv4-general-public",
@@ -36,69 +37,70 @@ def get_destination_network(source_network):
         # group project internal network
         "group-project-network": "group-project-network",
         "bjbjbgroup-project-network": "group-project-network"
-        }
+    }
     if source_network in network_mapping:
         return network_mapping[source_network]
     return None
 
+
 def get_destination_flavor(source_flavor):
     """ LUT for flavors """
     flavor_mapping = {
-        #'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
-        #'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
-        #'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
-        'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram', # nesedi velikost disku v G2 je 80 misto 35
-        'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram', # ok
-        'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram', # nesedi velikost disku v G2 je 80 misto 35
-        'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram', # ok
-        #'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
-        'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500', # neni v G2 a je potreba
-        'hpc.16core-128ram': 'c3.16core-128ram', # neni v G2 a je potreba
-        'hpc.16core-256ram': 'c3.16core-256ram', # neni v G2 a je potreba
-        'hpc.16core-32ram': 'c2.16core-30ram', # ok
-        'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk', # neni v G2 a je potreba
-        'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd', # neni v G2 a je potreba
-        'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram', # ok
-        'hpc.18core-48ram': 'c2.18core-45ram', # ok
-        'hpc.18core-64ram-dukan': 'c2.24core-60ram', # nemusime resit
-        'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd', # nemusime resit
-        'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500', # neni v G2 a je potreba
-        'hpc.30core-256ram': 'c3.30core-240ram', # ok
-        'hpc.30core-64ram': 'c3.32core-60ram', # v G2 je o 2 CPU vic
-        'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram', # ok
-        'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', #  ok
-        'hpc.4core-4ram': 'e1.medium', # nemusime resit
-        'hpc.8core-128ram': 'c3.8core-120ram', # OK
-        'hpc.8core-16ram': 'c2.8core-16ram', # ok
-        'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram', # nemusime resit
-        'hpc.8core-256ram': None, # nemusime resit
-        'hpc.8core-32ram-dukan': 'c2.8core-30ram', # nemusime resit
-        'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram', # ok
-        'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram', # ok
-        'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram', # ok
-        'hpc.8core-8ram': 'e1.1xlarge', # v G2 je o 20 GB mensi disk
-        'hpc.hdh-ephem': 'hpc.hdh', # neni a je potreba
-        'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500', # neni potreba
-        'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # neni potreba
-        #'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
-        'hpc.large': 'g2.3xlarge', # ok
-        'hpc.medium': 'c2.8core-30ram', # ok
-        'hpc.small': 'c2.4core-16ram', # ok
-        'hpc.xlarge': None, # neni v G2
-        'hpc.xlarge-memory': 'c3.xlarge-memory', # neni v G2
-        'standard.16core-32ram': 'g2.2xlarge', # ok
-        'standard.20core-128ram': 'e1.20core-128ram', # neni potreba
-        'standard.20core-256ram': 'e1.20core-256ram', # neni v G2
-        'standard.2core-16ram': 'c3.2core-16ram', # ok
-        'standard.large': 'e1.large', # ok pripadne jeste c3.4core-8ram
-        'standard.medium': 'e1.medium', # o 2 vice CPU
-        'standard.memory': 'c3.2core-30ram', # pripadne i c2.2core-30ram
-        'standard.one-to-many': 'c3.24core-60ram', # v G2 je o 4 vice CPU
-        'standard.small': 'e1.small', # 2x vice ram a CPU u G2
-        'standard.tiny': 'e1.tiny', # 2x vice ram a CPU u G2
-        'standard.xlarge': 'e1.2xlarge', # o 4 vice CPU G2
-        'standard.xlarge-cpu': 'e1.2xlarge', # ok
-        'standard.xxlarge': 'c2.8core-30ram', # ok
+        # 'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
+        # 'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
+        # 'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
+        'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram',  # nesedi velikost disku v G2 je 80 misto 35
+        'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram',  # ok
+        'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram',  # nesedi velikost disku v G2 je 80 misto 35
+        'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram',  # ok
+        # 'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
+        'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500',  # neni v G2 a je potreba
+        'hpc.16core-128ram': 'c3.16core-128ram',  # neni v G2 a je potreba
+        'hpc.16core-256ram': 'c3.16core-256ram',  # neni v G2 a je potreba
+        'hpc.16core-32ram': 'c2.16core-30ram',  # ok
+        'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk',  # neni v G2 a je potreba
+        'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd',  # neni v G2 a je potreba
+        'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram',  # ok
+        'hpc.18core-48ram': 'c2.18core-45ram',  # ok
+        'hpc.18core-64ram-dukan': 'c2.24core-60ram',  # nemusime resit
+        'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd',  # nemusime resit
+        'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500',  # neni v G2 a je potreba
+        'hpc.30core-256ram': 'c3.30core-240ram',  # ok
+        'hpc.30core-64ram': 'c3.32core-60ram',  # v G2 je o 2 CPU vic
+        'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram',  # ok
+        'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram',  # ok
+        'hpc.4core-4ram': 'e1.medium',  # nemusime resit
+        'hpc.8core-128ram': 'c3.8core-120ram',  # OK
+        'hpc.8core-16ram': 'c2.8core-16ram',  # ok
+        'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram',  # nemusime resit
+        'hpc.8core-256ram': None,  # nemusime resit
+        'hpc.8core-32ram-dukan': 'c2.8core-30ram',  # nemusime resit
+        'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram',  # ok
+        'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram',  # ok
+        'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram',  # ok
+        'hpc.8core-8ram': 'e1.1xlarge',  # v G2 je o 20 GB mensi disk
+        'hpc.hdh-ephem': 'hpc.hdh',  # neni a je potreba
+        'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500',  # neni potreba
+        'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram',  # neni potreba
+        # 'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
+        'hpc.large': 'g2.3xlarge',  # ok
+        'hpc.medium': 'c2.8core-30ram',  # ok
+        'hpc.small': 'c2.4core-16ram',  # ok
+        'hpc.xlarge': None,  # neni v G2
+        'hpc.xlarge-memory': 'c3.xlarge-memory',  # neni v G2
+        'standard.16core-32ram': 'g2.2xlarge',  # ok
+        'standard.20core-128ram': 'e1.20core-128ram',  # neni potreba
+        'standard.20core-256ram': 'e1.20core-256ram',  # neni v G2
+        'standard.2core-16ram': 'c3.2core-16ram',  # ok
+        'standard.large': 'e1.large',  # ok pripadne jeste c3.4core-8ram
+        'standard.medium': 'e1.medium',  # o 2 vice CPU
+        'standard.memory': 'c3.2core-30ram',  # pripadne i c2.2core-30ram
+        'standard.one-to-many': 'c3.24core-60ram',  # v G2 je o 4 vice CPU
+        'standard.small': 'e1.small',  # 2x vice ram a CPU u G2
+        'standard.tiny': 'e1.tiny',  # 2x vice ram a CPU u G2
+        'standard.xlarge': 'e1.2xlarge',  # o 4 vice CPU G2
+        'standard.xlarge-cpu': 'e1.2xlarge',  # ok
+        'standard.xxlarge': 'c2.8core-30ram',  # ok
         'standard.xxxlarge': 'c3.8core-60ram'  # ok
     }
     assert source_flavor in flavor_mapping, "Source flavor can be mapped to destination one"
@@ -111,11 +113,12 @@ def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_pr
     # read source network details
     src_network = src_ostack_conn.network.find_network(src_network_name, project_id=src_project.id)
     # read matching subnets details
-    src_subnets = [ src_ostack_conn.network.find_subnet(i_src_subnet_id) for i_src_subnet_id in src_network.subnet_ids ]
+    src_subnets = [src_ostack_conn.network.find_subnet(i_src_subnet_id) for i_src_subnet_id in src_network.subnet_ids]
     # read linked routers
-    src_network_router_ports = [ i_src_router_port for i_src_router_port in src_ostack_conn.list_ports(filters={'network_id': src_network.id}) if i_src_router_port.device_owner == 'network:router_interface' ]
-    src_network_routers_subnets = [ (src_ostack_conn.network.find_router(router_port.device_id), [rp_fixed_ip['subnet_id'] for rp_fixed_ip in router_port.fixed_ips if 'subnet_id' in rp_fixed_ip])  for router_port in src_network_router_ports ]
-
+    src_network_router_ports = [i_src_router_port for i_src_router_port in src_ostack_conn.list_ports(filters={'network_id': src_network.id}) if
+                                i_src_router_port.device_owner == 'network:router_interface']
+    src_network_routers_subnets = [(src_ostack_conn.network.find_router(router_port.device_id), [rp_fixed_ip['subnet_id'] for rp_fixed_ip in router_port.fixed_ips if 'subnet_id' in rp_fixed_ip]) for
+                                   router_port in src_network_router_ports]
 
     # read external network
     dst_ext_network = dst_ostack_conn.network.find_network(args.destination_ipv4_external_network)
@@ -179,10 +182,11 @@ def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_pr
 
     return dst_network, dst_subnets, dst_network_routers
 
+
 def get_or_create_dst_server_networking_v1(args,
-                                        source_project_conn, destination_project_conn,
-                                        source_project, destination_project,
-                                        source_server):
+                                           source_project_conn, destination_project_conn,
+                                           source_project, destination_project,
+                                           source_server):
     """ assure created server networking (get or create) """
     server_network_addresses = []
     for i_source_network_name, i_source_network_addresses in source_server.addresses.items():
@@ -203,6 +207,7 @@ def get_or_create_dst_server_networking_v1(args,
                                                                    'addresses': i_source_network_addresses}})
     return server_network_addresses
 
+
 def get_or_create_dst_server_networking_v2(args,
                                            source_project_conn, destination_project_conn,
                                            source_project, destination_project,
@@ -235,6 +240,7 @@ def get_or_create_dst_server_networking_v2(args,
 
     return server_network_addresses
 
+
 def get_or_create_dst_server_networking(args,
                                         source_project_conn, destination_project_conn,
                                         source_project, destination_project,
@@ -242,10 +248,10 @@ def get_or_create_dst_server_networking(args,
     """ assure created server networking (get or create) """
     server_network_addresses = []
     for i_src_server_port_network_name, i_src_server_port_network_addresses in source_server.addresses.items():
-        i_src_server_ip_addr = [(i_item['addr']) for i_item in i_src_server_port_network_addresses \
-                                                     if i_item['OS-EXT-IPS:type'] == 'fixed'][0]
-        i_src_server_mac_addr = [(i_item['OS-EXT-IPS-MAC:mac_addr']) for i_item in i_src_server_port_network_addresses \
-                                                                         if i_item['OS-EXT-IPS:type'] == 'fixed'][0]
+        i_src_server_ip_addr = [(i_item['addr']) for i_item in i_src_server_port_network_addresses
+                                if i_item['OS-EXT-IPS:type'] == 'fixed'][0]
+        i_src_server_mac_addr = [(i_item['OS-EXT-IPS-MAC:mac_addr']) for i_item in i_src_server_port_network_addresses
+                                 if i_item['OS-EXT-IPS:type'] == 'fixed'][0]
         i_src_server_ports = find_ostack_port(source_project_conn, i_src_server_mac_addr, i_src_server_ip_addr, device=source_server)
         log_or_assert(args, f"F.3 Source server ostack fixed IP address detected ({i_src_server_ip_addr})", i_src_server_ip_addr)
         log_or_assert(args, f"F.3 Source server ostack fixed MAC address detected ({i_src_server_mac_addr})", i_src_server_mac_addr)
@@ -340,6 +346,7 @@ def download_source_keypairs(args):
 
 
 def filter_keypairs(keypairs, filter_filed_name, filter_field_value):
+    """ filter keypairs list of dicts by value of a field """
     return [i_keypair for i_keypair in keypairs if i_keypair.get(filter_filed_name, "") == filter_field_value]
 
 
@@ -423,8 +430,8 @@ def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_
                     i_mod_rule['remote_group_id'] = _dst_sg.id
                 else:
                     int_linked_sg = create_security_groups(args, src_ostack_conn, dst_ostack_conn,
-                                                            _src_sg, dst_project,
-                                                            copy.deepcopy(int_recursion_stack))
+                                                           _src_sg, dst_project,
+                                                           copy.deepcopy(int_recursion_stack))
                     i_mod_rule['remote_group_id'] = int_linked_sg.id
         try:
             dst_ostack_conn.network.create_security_group_rule(**i_mod_rule)
@@ -433,6 +440,7 @@ def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_
 
     return int_sg
 
+
 def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project):
     """ duplicate all projects's openstack security group[s] """
 
@@ -442,7 +450,7 @@ def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_c
         j_dst_security_group_found = False
         for j_dst_security_group in tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id)):
             if get_dst_secgroup_name(args, i_src_security_group.name) == j_dst_security_group.name and \
-               i_src_security_group.id in j_dst_security_group.description:
+                    i_src_security_group.id in j_dst_security_group.description:
                 j_dst_security_group_found = True
         if not j_dst_security_group_found:
             create_security_groups(args, src_ostack_conn, dst_ostack_conn, i_src_security_group, dst_project)
@@ -452,38 +460,38 @@ def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_c
 
 def get_or_create_dst_server_security_groups(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project, src_server):
     """ assure equivalent security groups are created in destination cloud """
-    dst_server_security_groups=[]
+    dst_server_security_groups = []
     if src_server.security_groups:
         for i_src_server_security_group_name in {i_sg['name'] for i_sg in src_server.security_groups}:
             i_src_server_security_group = src_ostack_conn.network.find_security_group(i_src_server_security_group_name,
-                                                                                    project_id=src_project.id)
-            i_dst_server_security_group = None
+                                                                                      project_id=src_project.id)
             if i_dst_server_security_group := dst_ostack_conn.network.find_security_group(get_dst_secgroup_name(args,
                                                                                                                 i_src_server_security_group.name),
-                                                                                                                project_id=dst_project.id):
+                                                                                          project_id=dst_project.id):
                 log_or_assert(args,
-                            f"F.10 Destination OpenStack server security group found already ({i_dst_server_security_group.name})",
-                            i_dst_server_security_group)
+                              f"F.10 Destination OpenStack server security group found already ({i_dst_server_security_group.name})",
+                              i_dst_server_security_group)
             else:
                 args.logger.info("F.10 Destination OpenStack server matching security group not found and gets created.")
                 i_dst_server_security_group = create_security_groups(args, src_ostack_conn, dst_ostack_conn,
-                                                                    i_src_server_security_group, dst_project)
+                                                                     i_src_server_security_group, dst_project)
                 log_or_assert(args,
-                            f"F.10 Destination OpenStack server security group created ({i_dst_server_security_group.name})",
-                            i_dst_server_security_group)
+                              f"F.10 Destination OpenStack server security group created ({i_dst_server_security_group.name})",
+                              i_dst_server_security_group)
 
             log_or_assert(args,
-                        f"F.11 Destination OpenStack server security group exists ({i_dst_server_security_group.name})",
-                        i_dst_server_security_group)
+                          f"F.11 Destination OpenStack server security group exists ({i_dst_server_security_group.name})",
+                          i_dst_server_security_group)
             dst_server_security_groups.append(i_dst_server_security_group)
         log_or_assert(args,
-                    "F.12 Destination OpenStack server - destination security groups exists",
-                    dst_server_security_groups)
+                      "F.12 Destination OpenStack server - destination security groups exists",
+                      dst_server_security_groups)
     else:
         args.logger.info("F.10 Source OpenStack server does not have any security groups linked.")
 
     return dst_server_security_groups
 
+
 def get_server_block_device_mapping(args, server_volume_attachment, server_volume, server_root_device_name):
     """ return server block device mapping item """
     return {'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
@@ -502,7 +510,7 @@ def get_server_block_device_mapping(args, server_volume_attachment, server_volum
 
 def create_server_block_device_mappings(args, src_ostack_conn, src_server, source_rbd_images):
     """ create description how are block devices connected to (src/dst) VM server """
-    server_block_device_mappings = [ ]
+    server_block_device_mappings = []
     # schema: [ {}, ... ]
     # where {} is following dict
     # { 'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image', 'volume_attachment_id': <>, 'volume_id': <>,
@@ -520,8 +528,7 @@ def create_server_block_device_mappings(args, src_ostack_conn, src_server, sourc
     src_server_volume_attachments = tuple(src_ostack_conn.compute.volume_attachments(src_server.id))
     args.logger.debug(f"F.21 Source OpenStack server - volume attachments received {src_server_volume_attachments}")
 
-    src_ceph_ephemeral_rbd_image = None
-    if src_server_root_device_name in [ i_source_server_attachment.device for i_source_server_attachment in src_server_volume_attachments ]:
+    if src_server_root_device_name in [i_source_server_attachment.device for i_source_server_attachment in src_server_volume_attachments]:
         args.logger.info("F.22 Source OpenStack server - one of attached volume is attached as the root partition")
 
         # populate server_block_device_mappings
@@ -549,7 +556,7 @@ def create_server_block_device_mappings(args, src_ostack_conn, src_server, sourc
                           src_ceph_ephemeral_rbd_image_size)
 
             # populate server_block_device_mappings
-            ## initial disk
+            # initial disk
             server_block_device_mappings.append({'source': {'block_storage_type': 'ceph-rbd-image',
                                                             'volume_id': src_ceph_ephemeral_rbd_image,
                                                             'ceph_pool_name': args.source_ceph_ephemeral_pool_name,
@@ -564,7 +571,7 @@ def create_server_block_device_mappings(args, src_ostack_conn, src_server, sourc
                                                                  'device_name': os.path.basename(src_server_root_device_name),
                                                                  'volume_bootable': True}})
 
-            ## other disks attached to VM
+            # other disks attached to VM
             for i_source_server_volume_attachment in src_server_volume_attachments:
                 i_server_volume = src_ostack_conn.block_storage.find_volume(i_source_server_volume_attachment.volume_id)
                 server_block_device_mappings.append(get_server_block_device_mapping(args,
@@ -581,6 +588,7 @@ def create_server_block_device_mappings(args, src_ostack_conn, src_server, sourc
 
     return server_block_device_mappings
 
+
 def create_dst_server_volumes_update_block_device_mappings(args, server_block_device_mappings, dst_ostack_conn, destination_image):
     """ create destination cloud volumes and final destination server to block storage mappings """
     out_server_block_device_mappings = copy.deepcopy(server_block_device_mappings)
@@ -613,6 +621,7 @@ def create_dst_server_volumes_update_block_device_mappings(args, server_block_de
                       i_dst_server_block_device_mapping['destination']['volume_id'])
     return out_server_block_device_mappings
 
+
 def describe_server_network_connection(args, dst_ostack_conn, dst_project, netaddr_dict):
     """ create ostack server to network connection via network id or fixed-ip
         retults in single dictionary fed to conn.compute.create_server(...networks=[ <>, ...])
@@ -632,7 +641,7 @@ def describe_server_network_connection(args, dst_ostack_conn, dst_project, netad
                                      project=dst_project, network=dst_network)
     if dst_port_list and len(dst_port_list) == 1:
         dst_port = dst_port_list[0]
-        args.logger.debug(f"{func_name}() Reusing already existing ostack port. " \
+        args.logger.debug(f"{func_name}() Reusing already existing ostack port. "
                           f"(mac: {src_port.mac_address}, ip: {src_port_ip}, desc: ~ {src_port.id}")
     else:
         try:
@@ -648,13 +657,13 @@ def describe_server_network_connection(args, dst_ostack_conn, dst_project, netad
                                                            network_id=dst_network.id,
                                                            mac_address=src_port.mac_address,
                                                            fixed_ips=[dst_port_fixed_ip])
-        except Exception as ex:
+        except Exception:
             args.logger.error(f"{func_name}() throws exception while creating an ostack port.",
                               exc_info=True)
     if dst_port:
         return {'port': dst_port.id}
 
-    args.logger.warning(f"{func_name}() Creation of dedicated network port failed! " \
+    args.logger.warning(f"{func_name}() Creation of dedicated network port failed! "
                         f"Migrated VM will not have same internal IP address / MAC address. {msg_suffix}")
     return {'uuid': dst_network.id}
 
@@ -664,18 +673,18 @@ def create_dst_server(args, src_server, dst_ostack_conn, dst_project, flavor, ke
     # Note: argument network is not valid anymore, use networks
     server_args = {'name': get_dst_resource_name(args, src_server.name),
                    'flavorRef': flavor.id,
-                   'block_device_mapping_v2': [ {'source_type': 'volume',
-                                                 'destination_type': 'volume',
-                                                 'uuid': i_block_device_mapping['destination']['volume_id'],
-                                                 'device_name': i_block_device_mapping['destination']['device_name'],
-                                                 'boot_index': 0 if i_block_device_mapping['destination']['volume_bootable'] else None}
-                                                 for i_block_device_mapping in block_device_mappings ],
+                   'block_device_mapping_v2': [{'source_type': 'volume',
+                                                'destination_type': 'volume',
+                                                'uuid': i_block_device_mapping['destination']['volume_id'],
+                                                'device_name': i_block_device_mapping['destination']['device_name'],
+                                                'boot_index': 0 if i_block_device_mapping['destination']['volume_bootable'] else None}
+                                               for i_block_device_mapping in block_device_mappings],
                    'boot_volume': block_device_mappings[0]['destination']['volume_id'],
                    'key_name': keypair["name"],
-                   'networks': [ describe_server_network_connection(args,
-                                                                    dst_ostack_conn,
-                                                                    dst_project,
-                                                                    i_netaddr) for i_netaddr in server_network_addresses ]}
+                   'networks': [describe_server_network_connection(args,
+                                                                   dst_ostack_conn,
+                                                                   dst_project,
+                                                                   i_netaddr) for i_netaddr in server_network_addresses]}
     log_or_assert(args,
                   "F.35 Destination OpenStack server arguments are generated with valid block-device-mapping",
                   server_args['block_device_mapping_v2'], locals())
@@ -693,10 +702,12 @@ def create_dst_server(args, src_server, dst_ostack_conn, dst_project, flavor, ke
                   server.status == 'ACTIVE', locals())
     return server
 
+
 def get_ostack_objstore_containers(ostack_connection):
     """ receive objstore containers """
     return list(ostack_connection.object_store.containers())
 
+
 def find_ostack_port(ostack_connection, mac_address, ip_address, description_substr='', project=None, network=None, device=None):
     """ find openstack port and narrow down selection with MAC, IP and port description """
     query_ports_args = {}
@@ -707,9 +718,10 @@ def find_ostack_port(ostack_connection, mac_address, ip_address, description_sub
     if device:
         query_ports_args['device_id'] = device.id
     project_ports = ostack_connection.network.ports(**query_ports_args)
-    return [i_port for i_port in project_ports if i_port.mac_address==mac_address and \
-                                                  description_substr in i_port.description and \
-                                                  ip_address in [i_addr.get('ip_address') for i_addr in i_port.fixed_ips]]
+    return [i_port for i_port in project_ports if i_port.mac_address == mac_address and
+            description_substr in i_port.description and
+            ip_address in [i_addr.get('ip_address') for i_addr in i_port.fixed_ips]]
+
 
 def server_detect_floating_address(server):
     """ return True if server has attached floating IP address otherwise False """
@@ -719,6 +731,7 @@ def server_detect_floating_address(server):
                 return True
     return False
 
+
 def get_server_floating_ip_port(ostack_connection, server):
     """ set server's port where to put FIP, otherwise None """
     for i_port in ostack_connection.network.ports(device_id=server.id):
@@ -728,6 +741,7 @@ def get_server_floating_ip_port(ostack_connection, server):
                     return i_port
     return None
 
+
 def get_server_floating_ip_properties(server):
     """ return VM FIP properties (IP type, internal IP addr, FIP addr, MAC addr) """
     int_address_data = {}
@@ -738,11 +752,11 @@ def get_server_floating_ip_properties(server):
             for i_field_name in ('addr', 'version', 'OS-EXT-IPS-MAC:mac_addr',):
                 int_address_data[f"{int_ip_type}/{i_field_name}"] = i_addr_field.get(i_field_name, '?')
         if "fixed/version" in int_address_data and \
-           "floating/version" in int_address_data and \
-           "fixed/OS-EXT-IPS-MAC:mac_addr" in int_address_data and \
-           "floating/OS-EXT-IPS-MAC:mac_addr" in int_address_data and \
-           int_address_data["fixed/version"] == int_address_data["floating/version"] and \
-           int_address_data["fixed/OS-EXT-IPS-MAC:mac_addr"] == int_address_data["floating/OS-EXT-IPS-MAC:mac_addr"]:
+                "floating/version" in int_address_data and \
+                "fixed/OS-EXT-IPS-MAC:mac_addr" in int_address_data and \
+                "floating/OS-EXT-IPS-MAC:mac_addr" in int_address_data and \
+                int_address_data["fixed/version"] == int_address_data["floating/version"] and \
+                int_address_data["fixed/OS-EXT-IPS-MAC:mac_addr"] == int_address_data["floating/OS-EXT-IPS-MAC:mac_addr"]:
             int_address_data["fixed/network-name"] = i_net_name
             return int_address_data
     return {}
@@ -828,12 +842,11 @@ def compare_quota_values(value_1, value_2):
 
     Treats `None` and `-1` values as unlimited, i.e. always bigger than any other limited value.
     """
-    # treat None as unlimited quota, i.e. -1
-    val_1 = -1 if value_1 is None else value_1
-    val_2 = -1 if value_2 is None else value_2
-    if val_1 == val_2:
-        return 0
-    if val_1 > val_2 or val_1 == -1:
+    # treat None and -1 as unlimited quota
+    val_1 = math.inf if value_1 is None or value_1 == -1 else value_1
+    val_2 = math.inf if value_2 is None or value_2 == -1 else value_2
+    if val_1 > val_2:
         return 1
-    if val_1 < val_2 or val_2 == -1:
+    if val_1 < val_2:
         return -1
+    return 0
diff --git a/ci/project-flavor-migration-check.py b/ci/project-flavor-migration-check.py
index 7efeab1..6e93dd3 100755
--- a/ci/project-flavor-migration-check.py
+++ b/ci/project-flavor-migration-check.py
@@ -26,6 +26,7 @@ import yaml
 import lib
 import olib
 
+
 def main(args):
     """ main project migration loop """
     # connect to source cloud
@@ -53,21 +54,20 @@ def main(args):
     source_project_conn = lib.get_ostack_connection(source_migrator_openrc | {'OS_PROJECT_NAME': source_project.name})
 
     # get source/destination entities in the project
-    source_project_servers = lib.get_ostack_project_servers(source_project_conn, source_project)
+    source_project_servers = lib.get_ostack_project_servers(source_project_conn)
     args.logger.info("E.01 Source OpenStack cloud servers received")
     lib.assert_entity_ownership(source_project_servers, source_project)
     args.logger.info(f"E.02 Source OpenStack cloud project has {len(source_project_servers)} servers.")
 
     args.logger.info("F.00 Main looping started")
-    args.logger.info(f"F.00 Source VM servers: {[ i_source_server.name for i_source_server in source_project_servers]}")
-
+    args.logger.info(f"F.00 Source VM servers: {[i_source_server.name for i_source_server in source_project_servers]}")
 
     source_flavor_names = []
     destination_expected_flavor_names = []
     for i_source_server in source_project_servers:
         i_source_server_detail = source_project_conn.compute.find_server(i_source_server.id)
 
-        args.logger.info(f"F.01 server evaluation started - name:{i_source_server_detail.name}, " \
+        args.logger.info(f"F.01 server evaluation started - name:{i_source_server_detail.name}, "
                          f"flavor: {i_source_server_detail.flavor.name}, addresses: {i_source_server_detail.addresses}, status: {i_source_server_detail.status}")
 
         # flavor detection
@@ -89,7 +89,7 @@ def main(args):
             continue
         destination_expected_nonpublic_flavor_names.append(i_dst_flavor.name)
 
-    cld_entities_structure = {'acls' : {'flavors': destination_expected_nonpublic_flavor_names}}
+    cld_entities_structure = {'acls': {'flavors': destination_expected_nonpublic_flavor_names}}
     print(yaml.dump(cld_entities_structure))
 
 
diff --git a/ci/project-migrator.py b/ci/project-migrator.py
index 41a18eb..9dbc883 100755
--- a/ci/project-migrator.py
+++ b/ci/project-migrator.py
@@ -30,13 +30,13 @@ Usage example:
 
 import argparse
 import logging
-import pprint
 import sys
 
-import lib
 import clib
+import lib
 import olib
 
+
 def main(args):
     """ main project migration loop """
     # connect to source cloud
@@ -75,22 +75,22 @@ def main(args):
     source_project_conn = lib.get_ostack_connection(source_migrator_openrc | {'OS_PROJECT_NAME': source_project.name})
     destination_project_conn = lib.get_ostack_connection(destination_migrator_openrc | {'OS_PROJECT_NAME': destination_project.name})
 
-    args.logger.info(f"C.01 Source and destination project quotas comparison:")
+    args.logger.info("C.01 Source and destination project quotas comparison:")
     olib.compare_and_log_projects_quotas(args, "C.01", source_project_conn, source_project.id, destination_project_conn, destination_project.id)
 
     # connect to migrator node
-    reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
-                                                                  args.ceph_migrator_sshkeyfile.name, 'uname -a')
+    reply_stdout, _, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
+                                                       args.ceph_migrator_sshkeyfile.name, 'uname -a')
     lib.log_or_assert(args, "D.01 Migrator host is reachable", 'Linux' in reply_stdout and reply_ecode == 0)
 
-    reply_stdout, reply_stderr, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
-                                                                  args.ceph_migrator_sshkeyfile.name,
-                                                                  '/root/migrator/ceph-accessible.sh')
+    reply_stdout, _, reply_ecode = lib.remote_cmd_exec(args.ceph_migrator_host, args.ceph_migrator_user,
+                                                       args.ceph_migrator_sshkeyfile.name,
+                                                       '/root/migrator/ceph-accessible.sh')
     lib.log_or_assert(args, "D.02 Ceph is available from the migrator host", reply_ecode == 0)
 
     source_rbd_images = {args.source_ceph_ephemeral_pool_name: None,
                          args.source_ceph_cinder_pool_name: None}
-    for i_pool_name in source_rbd_images.keys():
+    for i_pool_name in source_rbd_images:
         source_rbd_images[i_pool_name] = clib.ceph_rbd_images_list(args, i_pool_name)
         lib.log_or_assert(args, f"D.03 Source cloud RBD images are received ({i_pool_name}).", source_rbd_images[i_pool_name])
 
@@ -99,18 +99,18 @@ def main(args):
 
     source_objstore_containers = olib.get_ostack_objstore_containers(source_project_conn)
     if source_objstore_containers:
-        args.logger.warning("D.10 Source OpenStack cloud project contains some object-store containers. " \
+        args.logger.warning("D.10 Source OpenStack cloud project contains some object-store containers. "
                             f"Manual objstore data copy is required. Detected containers:{source_objstore_containers}")
     else:
         args.logger.info("D.10 Source OpenStack cloud project has no object-store containers")
 
     # get source/destination entities in the project
-    source_project_servers = lib.get_ostack_project_servers(source_project_conn, source_project)
+    source_project_servers = lib.get_ostack_project_servers(source_project_conn)
     args.logger.info("E.01 Source OpenStack cloud servers received")
     lib.assert_entity_ownership(source_project_servers, source_project)
     args.logger.info(f"E.02 Source OpenStack cloud project has {len(source_project_servers)} servers.")
 
-    destination_project_servers = lib.get_ostack_project_servers(destination_project_conn, destination_project)
+    destination_project_servers = lib.get_ostack_project_servers(destination_project_conn)
     args.logger.info("E.10 Destination OpenStack cloud servers received")
     lib.assert_entity_ownership(destination_project_servers, destination_project)
     args.logger.info(f"E.11 Destination OpenStack cloud project has {len(destination_project_servers)} servers.")
@@ -127,7 +127,7 @@ def main(args):
     if args.dry_run:
         args.logger.info("Exiting before first cloud modification operation as in dry-run mode.")
         if args.debugging:
-            import IPython # on-purpose lazy import
+            import IPython  # on-purpose lazy import
             IPython.embed()
         return
 
@@ -137,7 +137,7 @@ def main(args):
     args.logger.info("E.40 Destination OpenStack project security groups duplicated")
 
     args.logger.info("F.00 Main looping started")
-    args.logger.info(f"F.00 Source VM servers: {[ i_source_server.name for i_source_server in source_project_servers]}")
+    args.logger.info(f"F.00 Source VM servers: {[i_source_server.name for i_source_server in source_project_servers]}")
     for i_source_server in source_project_servers:
         i_source_server_detail = source_project_conn.compute.find_server(i_source_server.id)
         i_source_server_fip_properties = olib.get_server_floating_ip_properties(i_source_server_detail)
@@ -155,11 +155,11 @@ def main(args):
             args.logger.info(f"F.01 server migration skipped - name:{i_source_server_detail.name} as equivalent VM exists in destination cloud (name: {i_destination_server_detail.name})")
             continue
 
-        args.logger.info(f"F.01 server migration started - name:{i_source_server_detail.name}, id:{i_source_server_detail.id}, " \
-                         f"keypair: {i_source_server_detail.key_name}, flavor: {i_source_server_detail.flavor}, " \
-                         f"sec-groups:{i_source_server_detail.security_groups}, root_device_name: {i_source_server_detail.root_device_name}, " \
-                         f"block_device_mapping: {i_source_server_detail.block_device_mapping}, " \
-                         f"attached-volumes: {i_source_server_detail.attached_volumes}" \
+        args.logger.info(f"F.01 server migration started - name:{i_source_server_detail.name}, id:{i_source_server_detail.id}, "
+                         f"keypair: {i_source_server_detail.key_name}, flavor: {i_source_server_detail.flavor}, "
+                         f"sec-groups:{i_source_server_detail.security_groups}, root_device_name: {i_source_server_detail.root_device_name}, "
+                         f"block_device_mapping: {i_source_server_detail.block_device_mapping}, "
+                         f"attached-volumes: {i_source_server_detail.attached_volumes}"
                          f"addresses: {i_source_server_detail.addresses}")
 
         # network/subnet/router detection & creation
@@ -211,12 +211,12 @@ def main(args):
 
         # start server in source cloud (if necessary), wait for VM being back in the same state as at the beginning
         if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status and \
-           not args.source_servers_left_shutoff:
+                not args.source_servers_left_shutoff:
             if i_source_server_detail.status == 'ACTIVE':
                 source_project_conn.compute.start_server(i_source_server_detail)
                 args.logger.info(f"F.34 Source OpenStack VM server (name:{i_source_server_detail.name}) requested to start")
             else:
-                args.logger.warning(f"F.34 Source OpenStack VM server (name:{i_source_server_detail.name}) is not in expected state, " \
+                args.logger.warning(f"F.34 Source OpenStack VM server (name:{i_source_server_detail.name}) is not in expected state, "
                                     f"but migrator does not know how to move to {i_source_server_detail.status} state")
 
         # start server in destination cloud
@@ -230,8 +230,9 @@ def main(args):
                                                       i_destination_server_network_addresses)
 
         # add security groups to the destination server (if missing)
-        for i_destination_server_security_group_id, i_destination_server_security_group_name in {(i_destination_server_security_group.id, i_destination_server_security_group.name) for i_destination_server_security_group in i_destination_server_security_groups}:
-            if {'name': i_destination_server_security_group_name } not in i_destination_server.security_groups:
+        dst_security_groups = {(i_destination_server_security_group.id, i_destination_server_security_group.name) for i_destination_server_security_group in i_destination_server_security_groups}
+        for i_destination_server_security_group_id, i_destination_server_security_group_name in dst_security_groups:
+            if {'name': i_destination_server_security_group_name} not in i_destination_server.security_groups:
                 destination_project_conn.add_server_security_groups(i_destination_server.id, i_destination_server_security_group_id)
         if args.migrate_fip_addresses and i_source_server_fip_properties:
             # add FIP as source VM has it
@@ -253,18 +254,18 @@ def main(args):
         args.logger.info(f"F.41 Source OpenStack server name:{i_source_server_detail.name} migrated into destination one name:{i_destination_server.name} id:{i_destination_server.id}")
 
         if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status and \
-           not args.source_servers_left_shutoff:
+                not args.source_servers_left_shutoff:
             if i_source_server_detail.status == 'ACTIVE':
                 if lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, i_source_server_detail.status) != i_source_server_detail.status:
                     args.logger.warning(f"F.42 Source OpenStack VM server has not become {i_source_server_detail.status} yet, trying again...")
                     source_project_conn.compute.start_server(i_source_server_detail)
                     args.logger.info(f"F.42 Source OpenStack VM server (name:{i_source_server_detail.name}) requested to start again")
                 if lib.wait_for_ostack_server_status(source_project_conn, i_source_server.id, i_source_server_detail.status) != i_source_server_detail.status:
-                    args.logger.error(f"F.42 Source OpenStack VM server (name:{i_source_server_detail.name}) has not become " \
-                                      f"{i_source_server_detail.status} yet (after second start). " \
+                    args.logger.error(f"F.42 Source OpenStack VM server (name:{i_source_server_detail.name}) has not become "
+                                      f"{i_source_server_detail.status} yet (after second start). "
                                       f"This situation is no longer asserted but needs manual admin inspection.")
             else:
-                args.logger.error(f"F.42 Source OpenStack VM server (name:{i_source_server_detail.name}) is not in proper state, " \
+                args.logger.error(f"F.42 Source OpenStack VM server (name:{i_source_server_detail.name}) is not in proper state, "
                                   f"but migrator does not know how to move to {i_source_server_detail.status} state")
         else:
             args.logger.info(f"F.42 Source OpenStack VM server (name:{i_source_server_detail.name}) back in expected state {i_source_server_detail.status}.")
@@ -278,8 +279,8 @@ def main(args):
                 args.logger.info(f"H.01 Source volume migration skipped as does not exist (name:{i_source_volume_name})")
                 continue
             if i_source_volume.status != 'available':
-                args.logger.info(f"H.02 Source volume migration skipped as it is not in state available (name:{i_source_volume_name}, state:{i_source_volume.status}). " \
-                                "Note in-use volumes are being migrated in VM server migration part.")
+                args.logger.info(f"H.02 Source volume migration skipped as it is not in state available (name:{i_source_volume_name}, state:{i_source_volume.status}). "
+                                 "Note in-use volumes are being migrated in VM server migration part.")
                 continue
 
             i_dst_volume = destination_project_conn.block_storage.create_volume(name=lib.get_dst_resource_name(args, i_source_volume.name),
@@ -288,20 +289,20 @@ def main(args):
                                                                                                                       i_source_volume.description,
                                                                                                                       i_source_volume.id))
             lib.log_or_assert(args,
-                            f"H.03 Destination OpenStack volume created (name:{i_dst_volume.name}, id:{i_dst_volume.id})", i_dst_volume)
+                              f"H.03 Destination OpenStack volume created (name:{i_dst_volume.name}, id:{i_dst_volume.id})", i_dst_volume)
             i_dst_volume_status = lib.wait_for_ostack_volume_status(destination_project_conn, i_dst_volume.id, 'available')
             lib.log_or_assert(args,
-                            f"H.04 Destination OpenStack volume available (name:{i_dst_volume.name}, id:{i_dst_volume.id})",
-                            i_dst_volume_status == 'available')
-            i_volume_mapping = {'source':      {'ceph_pool_name': args.source_ceph_cinder_pool_name,
-                                                'ceph_rbd_image_name': i_source_volume.id},
+                              f"H.04 Destination OpenStack volume available (name:{i_dst_volume.name}, id:{i_dst_volume.id})",
+                              i_dst_volume_status == 'available')
+            i_volume_mapping = {'source': {'ceph_pool_name': args.source_ceph_cinder_pool_name,
+                                           'ceph_rbd_image_name': i_source_volume.id},
                                 'destination': {'ceph_pool_name': args.destination_ceph_cinder_pool_name,
                                                 'volume_id': i_dst_volume.id}}
             clib.migrate_rbd_image(args, i_volume_mapping)
             i_dst_volume_detail = destination_project_conn.block_storage.find_volume(i_dst_volume.id)
             lib.log_or_assert(args,
-                            f"H.05 Destination OpenStack volume available (name:{i_dst_volume_detail.name}, id:{i_dst_volume_detail.id})",
-                            i_dst_volume_detail.status == 'available')
+                              f"H.05 Destination OpenStack volume available (name:{i_dst_volume_detail.name}, id:{i_dst_volume_detail.id})",
+                              i_dst_volume_detail.status == 'available')
 
 
 # main() call (argument parsing)
-- 
GitLab