""" OpenStack project migrator library """

import json
import re
import pprint
import time

import xmltodict
import paramiko
import openstack
from keystoneauth1.identity import v3
from keystoneauth1 import session


def wait_for_keypress(msg="Press Enter to continue..."):
    """ """
    return input("Press Enter to continue...")

def normalize_servers(servers):
    """ list of server names/IDs separated by space of comma returned as list of strings or None """
    if isinstance(servers, str) and servers:
        return servers.replace(","," ").split()
    return None

def trim_dict(dict_data, allowed_keys=None, denied_keys=None):
    """ transform input dictionary and filter its keys with allowed_keys and denied_keys sequences """
    int_allowed_keys = allowed_keys if allowed_keys else tuple()
    int_denied_keys = denied_keys if denied_keys else tuple()
    if int_allowed_keys:
        return {i_key: dict_data[i_key] for i_key in dict_data if i_key in int_allowed_keys}
    if int_denied_keys:
        return {i_key: dict_data[i_key] for i_key in dict_data if i_key not in int_denied_keys}
    return dict_data


def get_destination_network(source_network):
    """ LUT for networks """
    network_mapping = {
        # shared
        "78-128-250-pers-proj-net" :  "internal-ipv4-general-private",
        "147-251-115-pers-proj-net" : "internal-ipv4-general-private",
        "public-muni-v6-432" :        "external-ipv6-general-public",
        # external
        "public-muni-147-251-21-GROUP": "external-ipv4-general-public",
        "public-cesnet-78-128-250-PERSONAL": "external-ipv4-general-public",
        "public-cesnet-78-128-251-GROUP": "external-ipv4-general-public",
        "provider-public-cerit-sc-147-251-253": "external-ipv4-general-public",
        "public-muni-147-251-115-PERSONAL": "external-ipv4-general-public",
        "public-muni-147-251-124-GROUP": "external-ipv4-general-public",
        "public-cesnet-195-113-167-GROUP": "external-ipv4-general-public",
        "public-muni-147-251-11-128-254": "external-ipv4-general-public",
        "public-muni-CERIT-FI-147-251-88-132-254": "external-ipv4-general-public",
        "public-muni-CSIRT-MU-217-69-96-64-240": "external-ipv4-general-public",
        "public-muni-csirt-147-251-125-16-31": "external-ipv4-general-public",
        "provider-public-cerit-sc-147-251-254": "external-ipv4-general-public",
        # group project internal network
        "group-project-network": "group-project-network"
        }
    if source_network in network_mapping.keys():
        return network_mapping[source_network]
    return None

def get_destination_subnet(source_subnet):
    """ LUT for networks """
    subnet_mapping = {
        # TODO: shared

        # group project internal network
        "group-project-network-subnet": "group-project-network-subnet"
        }
    if source_subnet in subnet_mapping.keys():
        return subnet_mapping[source_subnet]
    return None

def get_destination_router(source_router):
    """ LUT for networks """
    router_mapping = {
        # TODO: shared

        # group project internal network
        "router": "group-project-router"
        }
    if source_router in router_mapping.keys():
        return router_mapping[source_router]
    return None


def get_destination_flavor(source_flavor):
    """ LUT for flavors """
    flavor_mapping = {
        #'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
        #'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
        #'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
        'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram', # nesedi velikost disku v G2 je 80 misto 35
        'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram', # ok
        'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram', # nesedi velikost disku v G2 je 80 misto 35
        'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram', # ok
        #'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
        'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500', # neni v G2 a je potreba
        'hpc.16core-128ram': 'c3.16core-128ram', # neni v G2 a je potreba
        'hpc.16core-256ram': 'c3.16core-256ram', # neni v G2 a je potreba
        'hpc.16core-32ram': 'c2.16core-30ram', # ok
        'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk', # neni v G2 a je potreba
        'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd', # neni v G2 a je potreba
        'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram', # ok
        'hpc.18core-48ram': '', # neni v G2 a je potreba
        'hpc.18core-64ram-dukan': 'c2.24core-60ram', # nemusime resit
        'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd', # nemusime resit
        'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500', # neni v G2 a je potreba
        'hpc.30core-256ram': 'c3.30core-256ram', # neni v G2 a je potreba
        'hpc.30core-64ram': 'c3.32core-60ram', # v G2 je o 2 CPU vic
        'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram', # ok
        'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', #  ok
        'hpc.4core-4ram': 'e1.medium', # nemusime resit
        'hpc.8core-128ram': 'c3.8core-128ram', # neni v G2 a je potreba
        'hpc.8core-16ram': 'c2.8core-16ram', # ok
        'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram', # nemusime resit
        'hpc.8core-256ram': None, # nemusime resit
        'hpc.8core-32ram-dukan': 'c2.8core-30ram', # nemusime resit
        'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram', # ok
        'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram', # ok
        'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram', # ok
        'hpc.8core-8ram': 'e1.1xlarge', # v G2 je o 20 GB mensi disk
        'hpc.hdh-ephem': 'hpc.hdh', # neni a je potreba
        'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500', # neni potreba
        'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # neni potreba
        #'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
        'hpc.large': 'g2.3xlarge', # ok
        'hpc.medium': 'c2.8core-30ram', # ok
        'hpc.small': 'c2.4core-16ram', # ok
        'hpc.xlarge': None, # neni v G2
        'hpc.xlarge-memory': 'c3.xlarge-memory', # neni v G2
        'standard.16core-32ram': 'g2.2xlarge', # ok
        'standard.20core-128ram': 'e1.20core-128ram', # neni potreba
        'standard.20core-256ram': 'e1.20core-256ram', # neni v G2
        'standard.2core-16ram': 'c3.2core-16ram', # ok
        'standard.large': 'e1.large', # ok pripadne jeste c3.4core-8ram
        'standard.medium': 'e1.medium', # o 2 vice CPU
        'standard.memory': 'c3.2core-30ram', # pripadne i c2.2core-30ram
        'standard.one-to-many': 'c3.24core-60ram', # v G2 je o 4 vice CPU
        'standard.small': 'e1.small', # 2x vice ram a CPU u G2
        'standard.tiny': 'e1.tiny', # 2x vice ram a CPU u G2
        'standard.xlarge': 'e1.2xlarge', # o 4 vice CPU G2
        'standard.xlarge-cpu': 'e1.2xlarge', # ok
        'standard.xxlarge': 'c2.8core-30ram', # ok
        'standard.xxxlarge': 'c3.8core-60ram'  # ok
    }
    assert source_flavor in flavor_mapping, "Source flavor can be mapped to destination one"
    assert flavor_mapping[source_flavor], "Source flavor mapping is not valid"
    return flavor_mapping[source_flavor]

def normalize_table_data_field(data_field):
    """ normalize single data field (single data insert) """
    int_dict = {}
    i_name_key = '@name'
    for i_data_field_item in data_field:
        i_value_key = [ i_k for i_k in i_data_field_item.keys() if i_k != i_name_key][0]
        int_dict[i_data_field_item[i_name_key]] = i_data_field_item[i_value_key]
    return int_dict

def normalize_table_data(data):
    """ normalize whole table data """
    int_list = []
    for i_data_field in data:
        int_list.append(normalize_table_data_field(i_data_field['field']))
    return int_list

def get_migrated_resource_name(args, name):
    """ translate original name to destination one """
    return f"{args.destination_entity_prefix}{name}"

def get_openrc(file_handle):
    """ parse and return OpenRC file """
    openrc_vars = {}

    for line in file_handle:
        match = re.match(r'^export (\w+)=(.+)$', line.strip())
        if match:
            openrc_vars[match.group(1)] = match.group(2).strip('"')
    return openrc_vars


def get_ostack_connection(openrc_vars):
    """ """
    auth_args = {
        'auth_url': openrc_vars.get('OS_AUTH_URL'),
        'username': openrc_vars.get('OS_USERNAME'),
        'password': openrc_vars.get('OS_PASSWORD'),
        'project_name': openrc_vars.get('OS_PROJECT_NAME'),
        'project_domain_name': openrc_vars.get('OS_PROJECT_DOMAIN_NAME'),
        'user_domain_name': openrc_vars.get('OS_USER_DOMAIN_NAME'),
        'project_domain_id': openrc_vars.get('OS_PROJECT_DOMAIN_ID'),
        'user_domain_id': openrc_vars.get('OS_USER_DOMAIN_ID'),
    }
    connection_args = {
        'compute_api_version': openrc_vars.get('OS_COMPUTE_API_VERSION'),
        'identity_api_version': openrc_vars.get('OS_IDENTITY_API_VERSION'),
        'volume_api_version': openrc_vars.get('OS_VOLUME_API_VERSION')
    }
    auth = v3.Password(**auth_args)
    ostack_sess = session.Session(auth=auth)
    ostack_conn = openstack.connection.Connection(session=ostack_sess, **connection_args)
    return ostack_conn

def get_ostack_project(ostack_connection, project_name):
    project = None
    for i_project in ostack_connection.list_projects():
        if i_project.name == project_name:
            project = i_project
    return project

def get_ostack_project_type(ostack_connection, project):
    """ detect project type, return 'group' / 'personal' / 'other' """
    if project.name in [ i_user.name for i_user in ostack_connection.list_users() ]:
        return "personal"
    return "group"

def get_ostack_project_security_groups(ostack_connection, project=None):
    security_groups = []
    if project:
        for i_security_group in ostack_connection.network.security_groups():
            if i_security_group.tenant_id == project.id:
                security_groups.append(i_security_group)
        return security_groups
    return tuple(ostack_connection.network.security_groups())

def get_ostack_project_keypairs(ostack_connection, project=None):
    return ostack_connection.list_keypairs()
def get_ostack_project_keypairs2(ostack_connection, project=None):
    return list(ostack_connection.compute.keypairs())


def get_ostack_project_servers(ostack_connection, project=None):
    return tuple(ostack_connection.compute.servers())

def get_ostack_project_volumes(ostack_connection, project=None):
    return ostack_connection.block_store.volumes()

def get_ostack_project_flavors(ostack_connection, project=None):
    return tuple(ostack_connection.compute.flavors())

def get_resource_details(resources):
    """ inspect resources """
    for i_resource in resources:
        print(i_resource)
        pprint.pprint(i_resource)

def remote_cmd_exec(hostname, username, key_filename, command):
    """ executes remote command, returs stdout, stderr and exit-code or Exception """
    # Create SSH client
    ssh_client = paramiko.SSHClient()
    # Automatically add untrusted hosts
    ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ecode = None

    try:
        # Connect to the remote host
        pkey = paramiko.RSAKey.from_private_key_file(key_filename)
        ssh_client.connect(hostname, username=username, pkey=pkey, look_for_keys=False)

        # Execute the command, read the output and close
        stdin, stdout, stderr = ssh_client.exec_command(command)
        output = stdout.read().decode().strip()
        error = stderr.read().decode().strip()
        ecode = stdout.channel.recv_exit_status()
        ssh_client.close()

        return output, error, ecode

    except Exception as e:
        print("Error:", e)
        return None, None, e

def get_ceph_client_name(args, ceph_src_pool_name, ceph_dst_pool_name=None):
    int_pool_name = ceph_dst_pool_name if ceph_dst_pool_name else ceph_src_pool_name

    return "client.cinder" if int_pool_name in (args.source_ceph_cinder_pool_name, args.source_ceph_ephemeral_pool_name,) else "client.migrator"

def ceph_rbd_images_list(args, pool_name):
    """ """
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"/root/migrator/ceph-rbd-images-list.sh {pool_name}")
    assert stdout, f"RBD pool ({pool_name}) images received successfully (non-empty RBD list)"
    assert ecode == 0, f"RBD pool ({pool_name}) images received successfully (ecode)"
    return stdout.splitlines()

def ceph_rbd_image_info(args, pool_name, rbd_image_name):
    """ get ceph RBD image information """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-info.sh {pool_name} {rbd_image_name}")
    return json.loads(stdout), stderr, ecode



def ceph_rbd_image_exists(args, pool_name, rbd_image_name):
    """ detect whether RBD image {pool_name}/{rbd_image_name} exists """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-exists.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_delete(args, pool_name, rbd_image_name):
    """ delete RBD image {pool_name}/{rbd_image_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-delete.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_flatten(args, pool_name, rbd_image_name):
    """ flatten RBD image {pool_name}/{rbd_image_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-flatten.sh {pool_name} {rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_snapshot_name,
                         dst_pool_name, dst_rbd_image_name):
    """ clone RBD image {src_pool_name}/{src_rbd_image_name}@{src_rbd_image_snapshot_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
    ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-clone.sh {src_pool_name} {src_rbd_image_name} {src_rbd_image_snapshot_name} {dst_pool_name} {dst_rbd_image_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name):
    """ copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
    ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
    cmd = f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-copy.sh {src_pool_name} {src_rbd_image_name} {dst_pool_name} {dst_rbd_image_name}"
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            cmd)
    return stdout.splitlines(), stderr, ecode


def ceph_rbd_image_snapshot_exists(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ detect whether RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} exists """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-exists.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_snapshot_create(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ create RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-create.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode

def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
    """ delete RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
    ceph_client_name = get_ceph_client_name(args, pool_name)
    stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
                                            args.ceph_migrator_user,
                                            args.ceph_migrator_sshkeyfile.name,
                                            f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-delete.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
    return stdout.splitlines(), stderr, ecode


def assert_entity_ownership(entities, project):
    """ """
    for i_entity in entities:
        assert i_entity.project_id == project.id, f"Entity belongs to expected project (id: {project.id})"

def get_source_keypairs(args):
    """ """
    reply_stdout, reply_stderr, reply_ecode = remote_cmd_exec(args.ceph_migrator_host,
                                                              args.ceph_migrator_user,
                                                              args.ceph_migrator_sshkeyfile.name,
                                                              f"cat {args.source_keypair_xml_dump_file}")
    assert reply_ecode == 0, "Keypairs received"
    table_dictdata = xmltodict.parse(reply_stdout)
    table_data_dictdata = table_dictdata['mysqldump']['database']['table_data']['row']
    return normalize_table_data(table_data_dictdata)

def get_source_keypair(keypairs, keypair_name, user_id):
    """ """
    keypairs_selected = [ i_keypair for i_keypair in keypairs if i_keypair.get("name", "") == keypair_name and i_keypair.get("user_id", "") == user_id ]
    if keypairs_selected:
        return keypairs_selected[0]
    return None

def create_keypair(args, ostack_connection, keypair):
    """ create openstack keypair object """
    return ostack_connection.compute.create_keypair(name=get_migrated_resource_name(args, keypair['name']),
                                                    public_key=keypair['public_key'], type=keypair['type'])

def create_security_group(args, ostack_connection, security_group, project):
    """ create openstack security group """
    int_sg = ostack_connection.network.create_security_group(name=get_migrated_resource_name(args, security_group.name),
                                                             description=security_group.description,
                                                             project_id=project.id)
    #pprint.pprint(int_sg)

    for i_rule in security_group.security_group_rules:
        i_mod_rule = {i_k: i_rule[i_k] for i_k in i_rule if i_k not in ['id', 'project_id', 'tenant_id', 'revision_number', 'updated_at', 'created_at', 'tags', 'standard_attr_id', 'normalized_cidr']}
        i_mod_rule['security_group_id'] = int_sg.id
        i_mod_rule['project_id'] = project.id
        i_mod_rule = {i_k: i_mod_rule[i_k] for i_k in i_mod_rule if i_mod_rule[i_k] is not None}
        #pprint.pprint(i_rule)
        #pprint.pprint(i_mod_rule)
        try:
            ostack_connection.network.create_security_group_rule(**i_mod_rule)
        except openstack.exceptions.ConflictException as ex:
            # TODO: analyze whether Conflicts we have seen meat that security group role IS COMPLETELY identical
            # Alternative solution would be to remove rules after creation and add specific ones
            pass

    return int_sg

def log_or_assert(args, msg, condition, trace_details=None):
    """ log, assert, dump state """
    if not condition:
        with open(args.exception_trace_file, "w") as file:
            file.write(f"{msg}\n{pprint.pformat(trace_details)}\n\n{locals()}\n")
    assert condition, msg
    args.logger.info(msg)


def wait_for_ostack_server_status(ostack_connection, server_name_or_id, server_status, timeout=120):
    """ """
    int_start_timestamp = time.time()
    int_server = ostack_connection.compute.find_server(server_name_or_id)
    int_server_status = None
    while True:
        if time.time() > (int_start_timestamp + timeout):
            break
        int_server_status = ostack_connection.compute.find_server(int_server.id).status
        if int_server_status == server_status:
            break

    return int_server_status

def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_status, timeout=120):
    """ """
    int_start_timestamp = time.time()
    int_volume = ostack_connection.block_storage.find_volume(volume_name_or_id)
    int_volume_status = None
    while True:
        if time.time() > (int_start_timestamp + timeout):
            break
        int_volume_status = ostack_connection.block_storage.find_volume(int_volume.id).status
        if int_volume_status == volume_status:
            break

    return int_volume_status

def server_detect_floating_address(server):
    """ return True if server has attached floating IP address otherwise False """
    for _, i_ip_details in server.addresses.items():
        for i_ip_detail in i_ip_details:
            if str(i_ip_detail.get('version')) == '4' and i_ip_detail.get('OS-EXT-IPS:type') == 'floating':
                return True
    return False

def get_server_floating_ip_port(ostack_connection, server):
    """ set server's port where to put FIP, otherwise None """
    for i_port in ostack_connection.network.ports(device_id=server.id):
        for i_port_ip in i_port.fixed_ips:
            for i_ip_prefix in ('192.', '10.', '172.'):
                if str(i_port_ip.get('ip_address')).startswith(i_ip_prefix):
                    return i_port
    return None