Skip to content
Snippets Groups Projects
Commit 2cf79f97 authored by František Řezníček's avatar František Řezníček
Browse files

feat: quality improvements, announcing clib for ceph related actions, reduce dups

parent b4040c59
Branches
Tags
No related merge requests found
Pipeline #415126 passed
""" OpenStack migrator - ceph library """
import json
import os.path
from lib import remote_cmd_exec, log_or_assert
def get_ceph_client_name(args, ceph_src_pool_name, ceph_dst_pool_name=None):
""" identify which ceph user to use for planned ceph operation """
int_pool_name = ceph_dst_pool_name if ceph_dst_pool_name else ceph_src_pool_name
return "client.cinder" if int_pool_name in (args.source_ceph_cinder_pool_name, args.source_ceph_ephemeral_pool_name,) else "client.migrator"
def ceph_rbd_images_list(args, pool_name):
""" list ceph RBD images in pool named pool_name """
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-images-list.sh')
stdout, _, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"{script_path} {pool_name}")
assert stdout, f"RBD pool ({pool_name}) images received successfully (non-empty RBD list)"
assert ecode == 0, f"RBD pool ({pool_name}) images received successfully (ecode)"
return stdout.splitlines()
def ceph_rbd_image_info(args, pool_name, rbd_image_name):
""" get ceph RBD image information """
ceph_client_name = get_ceph_client_name(args, pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-info.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name}")
return json.loads(stdout), stderr, ecode
def ceph_rbd_image_exists(args, pool_name, rbd_image_name):
""" detect whether RBD image {pool_name}/{rbd_image_name} exists """
ceph_client_name = get_ceph_client_name(args, pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-exists.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_delete(args, pool_name, rbd_image_name):
""" delete RBD image {pool_name}/{rbd_image_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-delete.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_flatten(args, pool_name, rbd_image_name):
""" flatten RBD image {pool_name}/{rbd_image_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-flatten.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_snapshot_name,
dst_pool_name, dst_rbd_image_name):
""" clone RBD image {src_pool_name}/{src_rbd_image_name}@{src_rbd_image_snapshot_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-clone.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {src_pool_name} {src_rbd_image_name} {src_rbd_image_snapshot_name} {dst_pool_name} {dst_rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name):
""" copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-copy.sh')
cmd = f"CEPH_USER={ceph_client_name} {script_path} {src_pool_name} {src_rbd_image_name} {dst_pool_name} {dst_rbd_image_name}"
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
cmd)
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_snapshot_exists(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
""" detect whether RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} exists """
ceph_client_name = get_ceph_client_name(args, pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-snapshot-exists.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_snapshot_create(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
""" create RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-snapshot-create.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
""" delete RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
script_path = os.path.join(args.ceph_migrator_host_base_dir, 'ceph-rbd-image-snapshot-delete.sh')
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} {script_path} {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
return stdout.splitlines(), stderr, ecode
def migrate_rbd_image(args, server_block_device_mapping):
""" migrate G1 ceph RBD image to G2 ceph """
## G1: detect existing G1 RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
source_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
server_block_device_mapping['source']['ceph_rbd_image_name'])
log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - single image returned",
source_server_rbd_images and len(source_server_rbd_images) == 1, locals())
source_server_rbd_image = source_server_rbd_images[0]
## G2: find volume
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
destination_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
server_block_device_mapping['destination']['volume_id'])
log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - single image returned",
destination_server_rbd_images and len(destination_server_rbd_images) == 1, locals())
destination_server_rbd_image = destination_server_rbd_images[0]
## G1: create RBD image protected snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
source_rbd_image_snapshot_name = f"g1-g2-migration-{source_server_rbd_image}"
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.3 Source OpenStack VM RBD image has non-colliding snapshot " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode != 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_create(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.4 Source OpenStack VM RBD image snapshot created " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.5 Source OpenStack VM RBD image snapshot exists " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode == 0, locals())
## G2: delete RBD image
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
## G2: confirm volume is deleted
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
stdout, stderr, ecode = ceph_rbd_image_delete(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.6 Destination OpenStack VM RBD image deletion succeeded ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.7 Destination OpenStack VM RBD image does not exist ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode != 0, locals())
## G1: clone from snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
source_rbd_cloned_image_name = f"g1-g2-migration-{source_server_rbd_image}"
stdout, stderr, ecode = ceph_rbd_image_clone(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
"G.8 Source OpenStack VM RBD image cloned succesfully " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name} -> {server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.9 Source OpenStack VM cloned RBD image exists ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
## G1: flatten cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
stdout, stderr, ecode = ceph_rbd_image_flatten(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.10 Source OpenStack VM cloned RBD image flatten successfully ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
## G1->G2: copy RBD image to target pool
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
stdout, stderr, ecode = ceph_rbd_image_copy(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
"G.11 Source OpenStack VM RBD image copied G1 -> G2 succesfully" \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name} -> {server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.12 Destination OpenStack VM RBD image exists ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode == 0, locals())
## G1: delete cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
stdout, stderr, ecode = ceph_rbd_image_delete(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.13 Source OpenStack VM RBD cloned image deletion succeeded ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.14 Source OpenStack VM cloned RBD image does not exist anymore ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode != 0, locals())
## G1: remove created snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.15 Source OpenStack VM RBD image snapshot still exists " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_delete(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.16 Source OpenStack VM RBD image snapshot deletion succeeeded " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.17 Source OpenStack VM RBD image snapshot does not exist anymore " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode != 0, locals())
......@@ -18,10 +18,10 @@ def wait_for_keypress(msg="Press Enter to continue..."):
""" """
return input("Press Enter to continue...")
def normalize_servers(servers):
""" list of server names/IDs separated by space of comma returned as list of strings or None """
if isinstance(servers, str) and servers:
return servers.replace(","," ").split()
def get_resource_names_ids(resources):
""" parses list of resource names/IDs separated by space of comma returned as list of strings or None """
if isinstance(resources, str) and resources:
return resources.replace(","," ").split()
return None
def trim_dict(dict_data, allowed_keys=None, denied_keys=None):
......@@ -288,107 +288,6 @@ def remote_cmd_exec(hostname, username, key_filename, command):
print("Error:", e)
return None, None, e
def get_ceph_client_name(args, ceph_src_pool_name, ceph_dst_pool_name=None):
int_pool_name = ceph_dst_pool_name if ceph_dst_pool_name else ceph_src_pool_name
return "client.cinder" if int_pool_name in (args.source_ceph_cinder_pool_name, args.source_ceph_ephemeral_pool_name,) else "client.migrator"
def ceph_rbd_images_list(args, pool_name):
""" """
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"/root/migrator/ceph-rbd-images-list.sh {pool_name}")
assert stdout, f"RBD pool ({pool_name}) images received successfully (non-empty RBD list)"
assert ecode == 0, f"RBD pool ({pool_name}) images received successfully (ecode)"
return stdout.splitlines()
def ceph_rbd_image_info(args, pool_name, rbd_image_name):
""" get ceph RBD image information """
ceph_client_name = get_ceph_client_name(args, pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-info.sh {pool_name} {rbd_image_name}")
return json.loads(stdout), stderr, ecode
def ceph_rbd_image_exists(args, pool_name, rbd_image_name):
""" detect whether RBD image {pool_name}/{rbd_image_name} exists """
ceph_client_name = get_ceph_client_name(args, pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-exists.sh {pool_name} {rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_delete(args, pool_name, rbd_image_name):
""" delete RBD image {pool_name}/{rbd_image_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-delete.sh {pool_name} {rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_flatten(args, pool_name, rbd_image_name):
""" flatten RBD image {pool_name}/{rbd_image_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-flatten.sh {pool_name} {rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_clone(args, src_pool_name, src_rbd_image_name, src_rbd_image_snapshot_name,
dst_pool_name, dst_rbd_image_name):
""" clone RBD image {src_pool_name}/{src_rbd_image_name}@{src_rbd_image_snapshot_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-clone.sh {src_pool_name} {src_rbd_image_name} {src_rbd_image_snapshot_name} {dst_pool_name} {dst_rbd_image_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_copy(args, src_pool_name, src_rbd_image_name, dst_pool_name, dst_rbd_image_name):
""" copy RBD image {src_pool_name}/{src_rbd_image_name} -> {dst_pool_name}/{dst_rbd_image_name}"""
ceph_client_name = get_ceph_client_name(args, src_pool_name, dst_pool_name)
cmd = f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-copy.sh {src_pool_name} {src_rbd_image_name} {dst_pool_name} {dst_rbd_image_name}"
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
cmd)
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_snapshot_exists(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
""" detect whether RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} exists """
ceph_client_name = get_ceph_client_name(args, pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-exists.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_snapshot_create(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
""" create RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-create.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
return stdout.splitlines(), stderr, ecode
def ceph_rbd_image_snapshot_delete(args, pool_name, rbd_image_name, rbd_image_snapshot_name):
""" delete RBD image snapshot {pool_name}/{rbd_image_name}@{rbd_image_snapshot_name} """
ceph_client_name = get_ceph_client_name(args, pool_name)
stdout, stderr, ecode = remote_cmd_exec(args.ceph_migrator_host,
args.ceph_migrator_user,
args.ceph_migrator_sshkeyfile.name,
f"CEPH_USER={ceph_client_name} /root/migrator/ceph-rbd-image-snapshot-delete.sh {pool_name} {rbd_image_name} {rbd_image_snapshot_name}")
return stdout.splitlines(), stderr, ecode
def assert_entity_ownership(entities, project):
""" """
......@@ -493,7 +392,7 @@ def wait_for_ostack_server_status(ostack_connection, server_name_or_id, server_s
return int_server_status
def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_status, timeout=120):
def wait_for_ostack_volume_status(ostack_connection, volume_name_or_id, volume_status, timeout=300):
""" """
int_start_timestamp = time.time()
int_volume = ostack_connection.block_storage.find_volume(volume_name_or_id)
......@@ -543,174 +442,6 @@ def get_server_block_device_mapping(args, server_volume_attachment, server_volum
def migrate_rbd_image(args, server_block_device_mapping):
""" migrate G1 ceph RBD image to G2 ceph """
## G1: detect existing G1 RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms 0069e95e-e805-44ff-bab5-872424312ff6
source_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
server_block_device_mapping['source']['ceph_rbd_image_name'])
log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.1 Source OpenStack VM RBD image exists - single image returned",
source_server_rbd_images and len(source_server_rbd_images) == 1, locals())
source_server_rbd_image = source_server_rbd_images[0]
## G2: find volume
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
destination_server_rbd_images, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
server_block_device_mapping['destination']['volume_id'])
log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - query succeeded", ecode == 0, locals())
log_or_assert(args, "G.2 Destination OpenStack VM RBD image exists - single image returned",
destination_server_rbd_images and len(destination_server_rbd_images) == 1, locals())
destination_server_rbd_image = destination_server_rbd_images[0]
## G1: create RBD image protected snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-create.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
source_rbd_image_snapshot_name = f"g1-g2-migration-{source_server_rbd_image}"
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.3 Source OpenStack VM RBD image has non-colliding snapshot " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode != 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_create(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.4 Source OpenStack VM RBD image snapshot created " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.5 Source OpenStack VM RBD image snapshot exists " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name})",
ecode == 0, locals())
## G2: delete RBD image
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-delete.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
## G2: confirm volume is deleted
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 1
stdout, stderr, ecode = ceph_rbd_image_delete(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.6 Destination OpenStack VM RBD image deletion succeeded ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.7 Destination OpenStack VM RBD image does not exist ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode != 0, locals())
## G1: clone from snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-clone.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-exists.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
source_rbd_cloned_image_name = f"g1-g2-migration-{source_server_rbd_image}"
stdout, stderr, ecode = ceph_rbd_image_clone(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
"G.8 Source OpenStack VM RBD image cloned succesfully " \
f"({server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name} -> {server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.9 Source OpenStack VM cloned RBD image exists ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
## G1: flatten cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-flatten.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
stdout, stderr, ecode = ceph_rbd_image_flatten(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.10 Source OpenStack VM cloned RBD image flatten successfully ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
## G1->G2: copy RBD image to target pool
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-copy.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk cloud-cinder-volumes-prod-brno <g2-rbd-image-name>
#CEPH_USER=client.migrator ~/migrator/ceph-rbd-image-exists.sh cloud-cinder-volumes-prod-brno <g2-rbd-image-name> # 0
stdout, stderr, ecode = ceph_rbd_image_copy(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
"G.11 Source OpenStack VM RBD image copied G1 -> G2 succesfully" \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name} -> {server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['destination']['ceph_pool_name'],
destination_server_rbd_image)
log_or_assert(args,
f"G.12 Destination OpenStack VM RBD image exists ({server_block_device_mapping['destination']['ceph_pool_name']}/{destination_server_rbd_image})",
ecode == 0, locals())
## G1: delete cloned RBD image
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-delete.sh prod-ephemeral-vms migrated-006e230e-df45-4f33-879b-19eada244489_disk
stdout, stderr, ecode = ceph_rbd_image_delete(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.13 Source OpenStack VM RBD cloned image deletion succeeded ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_rbd_cloned_image_name)
log_or_assert(args,
f"G.14 Source OpenStack VM cloned RBD image does not exist anymore ({server_block_device_mapping['source']['ceph_pool_name']}/{source_rbd_cloned_image_name})",
ecode != 0, locals())
## G1: remove created snapshot
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 0
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-delete.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2
#CEPH_USER=client.cinder ~/migrator/ceph-rbd-image-snapshot-exists.sh prod-ephemeral-vms 006e230e-df45-4f33-879b-19eada244489_disk migration-snap2 # 1
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.15 Source OpenStack VM RBD image snapshot still exists " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_delete(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.16 Source OpenStack VM RBD image snapshot deletion succeeeded " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode == 0, locals())
stdout, stderr, ecode = ceph_rbd_image_snapshot_exists(args,
server_block_device_mapping['source']['ceph_pool_name'],
source_server_rbd_image,
source_rbd_image_snapshot_name)
log_or_assert(args,
"G.17 Source OpenStack VM RBD image snapshot does not exist anymore " \
f"{server_block_device_mapping['source']['ceph_pool_name']}/{source_server_rbd_image}@{source_rbd_image_snapshot_name}",
ecode != 0, locals())
def create_destination_networking(args, src_ostack_conn, dst_ostack_conn, src_project, dst_project, src_network_name):
......
#!/usr/bin/env python3
"""
OpenStack project multicloud migrator
OpenStack project multi-cloud migrator
Tool performs OpenStack workflow migration from single OpenStack cloud to another one.
Block storage is transferred on external node using ceph low-level commands.
Tool expects same block storage connected to both clouds to be able to perform storage transfer quickly.
Block storage is transferred using external ceph migrator server node using ceph low-level commands.
Ceph migrator server node is allowed to perform ceph operations (ceph storage access is blocked outside OpenStack servers) and also provides enough disk space for object storage migration.
TODO: Object storage migration
Tool relies on main libraries:
* openstacksdk for OpenStack management
* paramiko for low-level ceph storage migration (--ceph-migrator-host)
Usage example:
* ./project-migrator.py --source-openrc ~/c/prod-einfra_cz_migrator.sh.inc
--destination-openrc ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc
--project-name meta-cloud-new-openstack
--validation-a-source-server-id <>
--ceph-migrator-sshkeyfile ~/.ssh/id_rsa.g1-g2-ostack-cloud-migration
* Migrate all running virtual servers from source OpenStack ~/c/prod-einfra_cz_migrator.sh.inc
project meta-cloud-new-openstack into destination one defined by
OpenRC ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc, validate user's request by
validating server existence with ID server-id-xyz in spource project
$ ./project-migrator.py
--source-openrc ~/c/prod-einfra_cz_migrator.sh.inc
--destination-openrc ~/c/g2-prod-brno-einfra_cz_migrator.sh.inc
--project-name meta-cloud-new-openstack
--validation-a-source-server-id server-id-xyz
--ceph-migrator-sshkeyfile ~/.ssh/id_rsa.g1-g2-ostack-cloud-migration
"""
import argparse
......@@ -22,6 +35,7 @@ import pprint
import sys
import lib
import clib
def main(args):
""" main project migration loop """
......@@ -81,7 +95,7 @@ def main(args):
source_rbd_images = {args.source_ceph_ephemeral_pool_name: None,
args.source_ceph_cinder_pool_name: None}
for i_pool_name in source_rbd_images.keys():
source_rbd_images[i_pool_name] = lib.ceph_rbd_images_list(args, i_pool_name)
source_rbd_images[i_pool_name] = clib.ceph_rbd_images_list(args, i_pool_name)
lib.log_or_assert(args, f"D.3 Source cloud RBD images are received ({i_pool_name}).", source_rbd_images[i_pool_name])
source_keypairs = lib.get_source_keypairs(args)
......@@ -242,7 +256,7 @@ def main(args):
args.logger.info(f"F.23 Source OpenStack server - Root partition found as RBD image {args.source_ceph_ephemeral_pool_name}/{i_source_ceph_ephemeral_rbd_image}")
# get rbd image info / size
i_source_ceph_ephemeral_rbd_image_data, _, _ = lib.ceph_rbd_image_info(args, args.source_ceph_ephemeral_pool_name,
i_source_ceph_ephemeral_rbd_image_data, _, _ = clib.ceph_rbd_image_info(args, args.source_ceph_ephemeral_pool_name,
i_source_ceph_ephemeral_rbd_image)
lib.log_or_assert(args, f"F.24 Source OpenStack ceph RBD image proper information received {i_source_ceph_ephemeral_rbd_image_data}",
i_source_ceph_ephemeral_rbd_image_data and 'size' in i_source_ceph_ephemeral_rbd_image_data)
......@@ -308,7 +322,7 @@ def main(args):
# volume migration (browse i_server_block_device_mappings)
for i_server_block_device_mapping in i_server_block_device_mappings:
lib.migrate_rbd_image(args, i_server_block_device_mapping)
clib.migrate_rbd_image(args, i_server_block_device_mapping)
# start server in source cloud, wait for back being 'ACTIVE'
if i_source_server_detail.status != source_project_conn.compute.find_server(i_source_server.id).status and \
......@@ -391,7 +405,7 @@ def main(args):
'ceph_rbd_image_name': i_source_volume.id},
'destination': {'ceph_pool_name': args.destination_ceph_cinder_pool_name,
'volume_id': i_dst_volume.id}}
lib.migrate_rbd_image(args, i_volume_mapping)
clib.migrate_rbd_image(args, i_volume_mapping)
i_dst_volume_detail = destination_project_conn.block_storage.find_volume(i_dst_volume.id)
lib.log_or_assert(args,
f"H.5 Destination OpenStack volume available (name:{i_dst_volume_detail.name}, id:{i_dst_volume_detail.id})",
......@@ -413,6 +427,8 @@ if __name__ == "__main__":
help='OpenStack migrator ceph node username')
AP.add_argument('--ceph-migrator-sshkeyfile', default=None, type=argparse.FileType('r'),
help='OpenStack migrator SSH keyfile')
AP.add_argument('--ceph-migrator-host-base-dir', default='/root/migrator',
help='OpenStack ceph migrator base directory for scripts and operations on ceph mogrator host')
AP.add_argument('--source-ceph-cinder-pool-name', default='prod-cinder-volumes',
help='Source OpenStack/ceph cloud Cinder pool name')
AP.add_argument('--source-ceph-ephemeral-pool-name', default='prod-ephemeral-vms',
......@@ -422,7 +438,7 @@ if __name__ == "__main__":
AP.add_argument('--destination-ceph-ephemeral-pool-name', default='cloud-ephemeral-volumes-prod-brno',
help='Destination OpenStack/ceph cloud "ephemeral on ceph" or "libvirt ephemeral" pool name')
AP.add_argument('--source-keypair-xml-dump-file', default='/root/migrator/prod-nova_api_key_pairs.dump.xml',
help='Source OpenStack cloud keypair SQL/XML dump file name')
help='Source OpenStack cloud keypair SQL/XML dump file name (on ceph-migrator-host)')
AP.add_argument('--source-servers-left-shutoff', default=False, required=False, action='store_true',
help='Migrated source servers are left SHUTOFF (i.e. not started automatically).')
AP.add_argument('--destination-bootable-volume-image-name', default='cirros-0-x86_64',
......@@ -454,8 +470,8 @@ if __name__ == "__main__":
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ARGS = AP.parse_args()
ARGS.logger = logging.getLogger("project-migrator")
ARGS.explicit_server_names = lib.normalize_servers(ARGS.explicit_server_names)
ARGS.explicit_volume_names = lib.normalize_servers(ARGS.explicit_volume_names)
ARGS.explicit_server_names = lib.get_resource_names_ids(ARGS.explicit_server_names)
ARGS.explicit_volume_names = lib.get_resource_names_ids(ARGS.explicit_volume_names)
if ARGS.debugging:
import IPython
#IPython.embed()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment