Newer
Older
""" OpenStack project migrator library """

František Řezníček
committed
import copy
import re
import pprint
import time
import os
import os.path
import xmltodict
import paramiko
import openstack
from keystoneauth1.identity import v3
from keystoneauth1 import session
def wait_for_keypress(msg="Press Enter to continue..."):
""" """
return input("Press Enter to continue...")

František Řezníček
committed
def get_resource_names_ids(resources):
""" parses list of resource names/IDs separated by space of comma returned as list of strings or None """
if isinstance(resources, str) and resources:
return resources.replace(","," ").split()
return None
def trim_dict(dict_data, allowed_keys=None, denied_keys=None):
""" transform input dictionary and filter its keys with allowed_keys and denied_keys sequences """
int_allowed_keys = allowed_keys if allowed_keys else tuple()
int_denied_keys = denied_keys if denied_keys else tuple()
if int_allowed_keys:
return {i_key: dict_data[i_key] for i_key in dict_data if i_key in int_allowed_keys}
if int_denied_keys:
return {i_key: dict_data[i_key] for i_key in dict_data if i_key not in int_denied_keys}
return dict_data

František Řezníček
committed
def executed_as_admin_user_in_ci():
""" identity the script user within CI pipeline """
return os.environ.get('GITLAB_USER_LOGIN') in ('246254', '252651', 'Jan.Krystof', 'moravcova', '469240', 'Josef.Nemec', '247801')
def executed_in_ci():
""" detect CI environment """
envvar_names = ('CI_JOB_NAME', 'CI_REPOSITORY_URL', 'GITLAB_USER_LOGIN')
return {i_envvar_name in os.environ for i_envvar_name in envvar_names} == {True}

František Řezníček
committed
def get_ostack_project_names(project_name):
""" get source and destination ostack project names """
if '->' in project_name:
return project_name.split('->', 1)
return project_name, project_name
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
def get_destination_subnet(source_subnet):
""" LUT for networks """
subnet_mapping = {
# TODO: shared
# group project internal network
"group-project-network-subnet": "group-project-network-subnet"
}
if source_subnet in subnet_mapping.keys():
return subnet_mapping[source_subnet]
return None
def get_destination_router(source_router):
""" LUT for networks """
router_mapping = {
# TODO: shared
# group project internal network
"router": "group-project-router"
}
if source_router in router_mapping.keys():
return router_mapping[source_router]
return None
def get_destination_flavor(source_flavor):
""" LUT for flavors """
flavor_mapping = {
#'eph.16cores-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
#'eph.8cores-30ram': 'c2.8core-30ram' # nemusime resit neni pouzit u zadneho projektu v g1
#'eph.8cores-60ram': 'c3.8core-60ram' # nemusime resit neni pouzit u zadneho projektu v g1
'hdn.cerit.large-35ssd-ephem': 'p3.4core-8ram', # nesedi velikost disku v G2 je 80 misto 35
'hdn.cerit.large-ssd-ephem': 'p3.4core-8ram', # ok
'hdn.cerit.medium-35ssd-ephem': 'p3.2core-4ram', # nesedi velikost disku v G2 je 80 misto 35
'hdn.cerit.xxxlarge-ssd-ephem': 'p3.8core-60ram', # ok
#'hdn.medium-ssd-ephem': # nemusime resit neni pouzit u zadneho projektu v g1
'hpc.12core-64ram-ssd-ephem-500': 'c3.12core-64ram-ssd-ephem-500', # neni v G2 a je potreba
'hpc.16core-128ram': 'c3.16core-128ram', # neni v G2 a je potreba
'hpc.16core-256ram': 'c3.16core-256ram', # neni v G2 a je potreba
'hpc.16core-32ram': 'c2.16core-30ram', # ok
'hpc.16core-32ram-100disk': 'c3.16core-32ram-100disk', # neni v G2 a je potreba
'hpc.16core-64ram-ssd-ephem': 'hpc.16core-64ram-ssd', # neni v G2 a je potreba
'hpc.16core-64ram-ssd-ephem-500': 'p3.16core-60ram', # ok
'hpc.18core-48ram': '', # neni v G2 a je potreba
'hpc.18core-64ram-dukan': 'c2.24core-60ram', # nemusime resit
'hpc.24core-96ram-ssd-ephem': 'hpc.24core-96ram-ssd', # nemusime resit
'hpc.30core-128ram-ssd-ephem-500': 'c3.30core-128ram-ssd-ephem-500', # neni v G2 a je potreba
'hpc.30core-256ram': 'c3.30core-256ram', # neni v G2 a je potreba
'hpc.30core-64ram': 'c3.32core-60ram', # v G2 je o 2 CPU vic
'hpc.4core-16ram-ssd-ephem': 'p3.4core-16ram', # ok
'hpc.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # ok
'hpc.4core-4ram': 'e1.medium', # nemusime resit
'hpc.8core-128ram': 'c3.8core-128ram', # neni v G2 a je potreba
'hpc.8core-16ram': 'c2.8core-16ram', # ok
'hpc.8core-16ram-ssd-ephem': 'p3.8core-16ram', # nemusime resit
'hpc.8core-256ram': None, # nemusime resit
'hpc.8core-32ram-dukan': 'c2.8core-30ram', # nemusime resit
'hpc.8core-32ram-ssd-ephem': 'p3.8core-30ram', # ok
'hpc.8core-32ram-ssd-rcx-ephem': 'p3.8core-30ram', # ok
'hpc.8core-64ram-ssd-ephem-500': 'p3.8core-60ram', # ok
'hpc.8core-8ram': 'e1.1xlarge', # v G2 je o 20 GB mensi disk
'hpc.hdh-ephem': 'hpc.hdh', # neni a je potreba
'hpc.hdn.30core-128ram-ssd-ephem-500': 'c3.hdn.30core-128ram-ssd-ephem-500', # neni potreba
'hpc.hdn.4core-16ram-ssd-ephem-500': 'p3.4core-16ram', # neni potreba
#'hpc.ics-gladosag-full': 'c3.ics-gladosag-full', # neni potreba
'hpc.large': 'g2.3xlarge', # ok
'hpc.medium': 'c2.8core-30ram', # ok
'hpc.small': 'c2.4core-16ram', # ok
'hpc.xlarge': None, # neni v G2
'hpc.xlarge-memory': 'c3.xlarge-memory', # neni v G2
'standard.16core-32ram': 'g2.2xlarge', # ok
'standard.20core-128ram': 'e1.20core-128ram', # neni potreba
'standard.20core-256ram': 'e1.20core-256ram', # neni v G2
'standard.2core-16ram': 'c3.2core-16ram', # ok
'standard.large': 'e1.large', # ok pripadne jeste c3.4core-8ram
'standard.medium': 'e1.medium', # o 2 vice CPU
'standard.memory': 'c3.2core-30ram', # pripadne i c2.2core-30ram
'standard.one-to-many': 'c3.24core-60ram', # v G2 je o 4 vice CPU
'standard.small': 'e1.small', # 2x vice ram a CPU u G2
'standard.tiny': 'e1.tiny', # 2x vice ram a CPU u G2
'standard.xlarge': 'e1.2xlarge', # o 4 vice CPU G2
'standard.xlarge-cpu': 'e1.2xlarge', # ok
'standard.xxlarge': 'c2.8core-30ram', # ok
'standard.xxxlarge': 'c3.8core-60ram' # ok
}
assert source_flavor in flavor_mapping, "Source flavor can be mapped to destination one"
assert flavor_mapping[source_flavor], "Source flavor mapping is not valid"
return flavor_mapping[source_flavor]
def normalize_table_data_field(data_field):
""" normalize single data field (single data insert) """
int_dict = {}
i_name_key = '@name'
for i_data_field_item in data_field:
i_value_key = [ i_k for i_k in i_data_field_item.keys() if i_k != i_name_key][0]
int_dict[i_data_field_item[i_name_key]] = i_data_field_item[i_value_key]
return int_dict
def normalize_table_data(data):
""" normalize whole table data """
int_list = []
for i_data_field in data:
int_list.append(normalize_table_data_field(i_data_field['field']))
return int_list

František Řezníček
committed
def get_dst_resource_name(args, name=""):
""" translate original name to destination one """

František Řezníček
committed
return f"{args.destination_entity_name_prefix}{name}"
def get_dst_resource_desc(args, desc="", fields=None):
""" translate original description to destination one and fill in optional fields """
if '{}' in args.destination_entity_description_suffix and fields:
return f"{desc}{args.destination_entity_description_suffix.format(fields)}"
return f"{desc}{args.destination_entity_description_suffix}"
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
def get_openrc(file_handle):
""" parse and return OpenRC file """
openrc_vars = {}
for line in file_handle:
match = re.match(r'^export (\w+)=(.+)$', line.strip())
if match:
openrc_vars[match.group(1)] = match.group(2).strip('"')
return openrc_vars
def get_ostack_connection(openrc_vars):
""" """
auth_args = {
'auth_url': openrc_vars.get('OS_AUTH_URL'),
'username': openrc_vars.get('OS_USERNAME'),
'password': openrc_vars.get('OS_PASSWORD'),
'project_name': openrc_vars.get('OS_PROJECT_NAME'),
'project_domain_name': openrc_vars.get('OS_PROJECT_DOMAIN_NAME'),
'user_domain_name': openrc_vars.get('OS_USER_DOMAIN_NAME'),
'project_domain_id': openrc_vars.get('OS_PROJECT_DOMAIN_ID'),
'user_domain_id': openrc_vars.get('OS_USER_DOMAIN_ID'),
}
connection_args = {
'compute_api_version': openrc_vars.get('OS_COMPUTE_API_VERSION'),
'identity_api_version': openrc_vars.get('OS_IDENTITY_API_VERSION'),
'volume_api_version': openrc_vars.get('OS_VOLUME_API_VERSION')
}
auth = v3.Password(**auth_args)
ostack_sess = session.Session(auth=auth)
ostack_conn = openstack.connection.Connection(session=ostack_sess, **connection_args)
return ostack_conn
def get_ostack_project(ostack_connection, project_name):
project = None
for i_project in ostack_connection.list_projects():
if i_project.name == project_name:
project = i_project
return project
def get_ostack_project_type(ostack_connection, project):
""" detect project type, return 'group' / 'personal' / 'other' """
if project.name in [ i_user.name for i_user in ostack_connection.list_users() ]:
return "personal"
return "group"
def get_ostack_project_security_groups(ostack_connection, project=None):
security_groups = []
if project:
for i_security_group in ostack_connection.network.security_groups():
if i_security_group.tenant_id == project.id:
security_groups.append(i_security_group)
return security_groups
return tuple(ostack_connection.network.security_groups())
def get_ostack_project_keypairs(ostack_connection, project=None):
return ostack_connection.list_keypairs()
def get_ostack_project_keypairs2(ostack_connection, project=None):
return list(ostack_connection.compute.keypairs())
def get_ostack_project_servers(ostack_connection, project=None):
return tuple(ostack_connection.compute.servers())
def get_ostack_project_volumes(ostack_connection, project=None):
return ostack_connection.block_store.volumes()
def get_ostack_project_flavors(ostack_connection, project=None):
return tuple(ostack_connection.compute.flavors())
def get_resource_details(resources):
""" inspect resources """
for i_resource in resources:
print(i_resource)
pprint.pprint(i_resource)
def remote_cmd_exec(hostname, username, key_filename, command):
""" executes remote command, returs stdout, stderr and exit-code or Exception """
# Create SSH client
ssh_client = paramiko.SSHClient()
# Automatically add untrusted hosts
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ecode = None
try:
# Connect to the remote host
pkey = paramiko.RSAKey.from_private_key_file(key_filename)
ssh_client.connect(hostname, username=username, pkey=pkey, look_for_keys=False)
# Execute the command, read the output and close
stdin, stdout, stderr = ssh_client.exec_command(command)
output = stdout.read().decode().strip()
error = stderr.read().decode().strip()
ecode = stdout.channel.recv_exit_status()
ssh_client.close()
return output, error, ecode
except Exception as e:
print("Error:", e)
return None, None, e
def assert_entity_ownership(entities, project):
""" """
for i_entity in entities:
assert i_entity.project_id == project.id, f"Entity belongs to expected project (id: {project.id})"
def get_source_keypairs(args):
""" """
reply_stdout, reply_stderr, reply_ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"cat {args.source_keypair_xml_dump_file}")
assert reply_ecode == 0, "Keypairs received"
table_dictdata = xmltodict.parse(reply_stdout)
table_data_dictdata = table_dictdata['mysqldump']['database']['table_data']['row']
return normalize_table_data(table_data_dictdata)
def get_source_keypair(keypairs, keypair_name, user_id):
""" """
keypairs_selected = [ i_keypair for i_keypair in keypairs if i_keypair.get("name", "") == keypair_name and i_keypair.get("user_id", "") == user_id ]
if keypairs_selected:
return keypairs_selected[0]
return None
def create_keypair(args, ostack_connection, keypair):
""" create openstack keypair object """

František Řezníček
committed
return ostack_connection.compute.create_keypair(name=get_dst_resource_name(args, keypair['name']),
public_key=keypair['public_key'], type=keypair['type'])

František Řezníček
committed
def create_security_groups(args, src_ostack_conn, dst_ostack_conn, src_security_group, dst_project, recursion_stack=None):
""" create openstack security group[s] """
int_recursion_stack = {} if recursion_stack is None else recursion_stack

František Řezníček
committed
int_sg = dst_ostack_conn.network.create_security_group(name=get_dst_resource_name(args, src_security_group.name),
description=get_dst_resource_desc(args,
src_security_group.description,
src_security_group.id),

František Řezníček
committed
project_id=dst_project.id)
int_recursion_stack[src_security_group.id] = int_sg.id
for i_rule in src_security_group.security_group_rules:
# browse security group rules
i_mod_rule = trim_dict(i_rule, denied_keys=['id', 'project_id', 'tenant_id', 'revision_number', 'updated_at', 'created_at', 'tags', 'standard_attr_id', 'normalized_cidr'])
i_mod_rule['security_group_id'] = int_sg.id

František Řezníček
committed
i_mod_rule['project_id'] = dst_project.id
i_mod_rule = {i_k: i_mod_rule[i_k] for i_k in i_mod_rule if i_mod_rule[i_k] is not None}

František Řezníček
committed
if i_mod_rule.get('remote_group_id') is not None:
if i_mod_rule['remote_group_id'] in int_recursion_stack:
# keep reference to itself or known (already created) SGs
i_mod_rule['remote_group_id'] = int_recursion_stack[i_mod_rule['remote_group_id']]
# get linked source SG
elif _src_sg := src_ostack_conn.network.find_security_group(i_mod_rule['remote_group_id']):

František Řezníček
committed
if _dst_sg := dst_ostack_conn.network.find_security_group(get_dst_resource_name(args, _src_sg.name),

František Řezníček
committed
project_id=dst_project.id):
i_mod_rule['remote_group_id'] = _dst_sg.id
else:
int_linked_sg = create_security_groups(args, src_ostack_conn, dst_ostack_conn,
_src_sg, dst_project,
copy.deepcopy(int_recursion_stack))
i_mod_rule['remote_group_id'] = int_linked_sg.id

František Řezníček
committed
dst_ostack_conn.network.create_security_group_rule(**i_mod_rule)
except openstack.exceptions.ConflictException as ex:
pass
return int_sg

František Řezníček
committed
def duplicate_ostack_project_security_groups(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project):
""" duplicate all projects's openstack security group[s] """
src_project_security_groups = tuple(src_ostack_conn.network.security_groups(project_id=src_project.id))
for i_src_security_group in src_project_security_groups:
j_dst_security_group_found = False
for j_dst_security_group in tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id)):

František Řezníček
committed
if get_dst_resource_name(args, i_src_security_group.name) == j_dst_security_group.name and \

František Řezníček
committed
i_src_security_group.id in j_dst_security_group.description:
j_dst_security_group_found = True
if not j_dst_security_group_found:
create_security_groups(args, src_ostack_conn, dst_ostack_conn, i_src_security_group, dst_project)
return src_project_security_groups, tuple(dst_ostack_conn.network.security_groups(project_id=dst_project.id))
def log_or_assert(args, msg, condition, trace_details=None):
""" log, assert, dump state """
if not condition:
with open(args.exception_trace_file, "w") as file:
file.write(f"{msg}\n{pprint.pformat(trace_details)}\n\n{locals()}\n")
assert condition, msg
args.logger.info(msg)
def wait_for_ostack_server_status(ostack_connection, server_name_or_id, server_status, timeout=600):
""" """
int_start_timestamp = time.time()
int_server = ostack_connection.compute.find_server(server_name_or_id)
int_server_status = None
while True:
if time.time() > (int_start_timestamp + timeout):
break
int_server_status = ostack_connection.compute.find_server(int_server.id).status
if int_server_status == server_status:
break
return int_server_status

František Řezníček
committed
def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_status, timeout=300):
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
""" """
int_start_timestamp = time.time()
int_volume = ostack_connection.block_storage.find_volume(volume_name_or_id)
int_volume_status = None
while True:
if time.time() > (int_start_timestamp + timeout):
break
int_volume_status = ostack_connection.block_storage.find_volume(int_volume.id).status
if int_volume_status == volume_status:
break
return int_volume_status
def server_detect_floating_address(server):
""" return True if server has attached floating IP address otherwise False """
for _, i_ip_details in server.addresses.items():
for i_ip_detail in i_ip_details:
if str(i_ip_detail.get('version')) == '4' and i_ip_detail.get('OS-EXT-IPS:type') == 'floating':
return True
return False
def get_server_floating_ip_port(ostack_connection, server):
""" set server's port where to put FIP, otherwise None """
for i_port in ostack_connection.network.ports(device_id=server.id):
for i_port_ip in i_port.fixed_ips:
for i_ip_prefix in ('192.', '10.', '172.'):
if str(i_port_ip.get('ip_address')).startswith(i_ip_prefix):
return i_port
return None
def get_server_block_device_mapping(args, server_volume_attachment, server_volume, server_root_device_name):
""" return server block device mapping item """
return {'source': {'block_storage_type': 'openstack-volume-ceph-rbd-image',
'volume_attachment_id': server_volume_attachment.id,
'volume_id': server_volume.id,
'ceph_pool_name': args.source_ceph_cinder_pool_name,
'ceph_rbd_image_name': server_volume.id},
'destination': {'volume_size': server_volume.size,

František Řezníček
committed
'volume_name': get_dst_resource_name(args, server_volume.name),
'volume_description': server_volume.description,
'volume_id': None,
'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'device_name': os.path.basename(server_volume_attachment.device),
'volume_bootable': server_root_device_name == server_volume_attachment.device}}

František Řezníček
committed
def describe_server_network_connection(args, dst_ostack_conn, netaddr_dict):
""" create ostack server to network connection via network id or fixed-ip
retults in single dictionary fed to conn.compute.create_server(...networks=[ <>, ...])
"""
# netaddr_dict{ 'dst-network': Network,
# 'src-network-addresses': {'network-name': <source-network-name>,
# 'addresses': [ ... ]} }
fixed_port = None
dst_network = netaddr_dict['dst-network']
source_server_fixed_addresses = [i_addr['addr'] for i_addr in netaddr_dict['src-network-addresses']['addresses'] if i_addr.get('OS-EXT-IPS:type') == 'fixed']
if len(source_server_fixed_addresses) == 1 and len(dst_network.subnet_ids) == 1:
try:

František Řezníček
committed
port_desc = "A part of workload migration: created to get same server fixed-ips"
fixed_port = dst_ostack_conn.network.create_port(name=get_dst_resource_name(args),
description=port_desc,
network_id=dst_network.id,
fixed_ips=[{"ip_address": source_server_fixed_addresses[0],
"subnet_id": dst_network.subnet_ids[0]}])
except:
pass
if fixed_port:
return {'port': fixed_port.id}
return {'uuid': dst_network.id}