Skip to content
Snippets Groups Projects
Commit aa2c87f3 authored by František Řezníček's avatar František Řezníček
Browse files

feat: prod-brno general demos

parent f8db614e
No related branches found
No related tags found
No related merge requests found
Showing
with 371 additions and 0 deletions
# general IaaS infrastructure demo
Assuming you are added ato a group project and you can log in via [e-INFRA CZ OpenStack cloud dashboard](https://horizon.brno.openstack.cloud.e-infra.cz/) using one of supported federations (e-INFRA CZ, ...).
We recommend to build custom cloud infrastructure with Terraform or openstack client rather than using [e-INFRA CZ OpenStack cloud dashboard](https://horizon.brno.openstack.cloud.e-infra.cz/).
Below demos show in detail how to do so.
## [Terraform `general` demo](./terraform)
Terraform demo shows how to automate building highly scalable IaaS infrastructure.
## [OpenStack client `general` demo](./commandline)
OpenStack shell script demo shows how to automate small IaaS infrastructure which does not need additional scalability.
# Build OpenStack infrastructure from command-line using openstack client
## Pre-requisites
* Linux/Mac/WSL2 terminal
* BASH shell
* installed openstack client ([how?](https://docs.fuga.cloud/how-to-use-the-openstack-cli-tools-on-linux))
* e-INFRA OpenStack cloud personal/group project granted.
* downloaded application credentials from OpenStack Horizon dashboard ([how?](https://docs.e-infra.cz/compute/openstack/how-to-guides/obtaining-api-key/)) and store as text file `project_openrc.sh.inc`.
## How to use the script
```sh
# in bash shell
source project_openrc.sh.inc
./cmdline-demo.sh basic-infrastructure-1
```
See linked reference executions for [personal](./cmdline-demo.sh.personal.log) and [group project](./cmdline-demo.sh.group.log).
## Infrastructure schema
How does the basic infrastructure looks like?
* single VM (ubuntu-jammy)
* VM firewall opening port 22
* VM SSH keypair generated locally and pubkey uploaded to cloud
* private subnet and network (skipped for personal projects where shared entities are used)
* router to external internet (skipped for personal projects where shared entities are used)
* public floating ip address
![basic-infrastructure.png](/clouds/common/pictures/basic-infrastructure.png)
#!/usr/bin/env bash
# e-INFRA CZ G2 openstack command-line demo - personal/group project
# Usage: cmdline-demo.sh [ostack-entities-prefix/profile-name]
#
SCRIPT_DIR=$(dirname $(readlink -f $0))
#############################################################################
# variables
#############################################################################
CLOUD_ENV_NAME="g2-prod-brno"
OPENSTACK_BIN="${OPENSTACK_BIN:-openstack}"
ENTITIES_PREFIX="${1:-"${USER}_$(hostname)"}"
EXTERNAL_NETWORK_NAME="external-ipv4-general-public"
KEYPAIR_NAME="${ENTITIES_PREFIX}-demo-keypair"
##NETWORK_NAME="${ENTITIES_PREFIX}-demo-network"
##SUBNET_NAME="${ENTITIES_PREFIX}-demo-subnet"
SUBNET_CIDR="${SUBNET_CIDR:-"192.168.222.0/24"}"
SERVER_NAME="${ENTITIES_PREFIX}-demo-server"
FLAVOR_NAME="${FLAVOR_NAME:-"e1.medium"}"
IMAGE_NAME="${IMAGE_NAME:-"ubuntu-jammy-x86_64"}"
VM_LOGIN="${VM_LOGIN:-"ubuntu"}"
##ROUTER_NAME="${ENTITIES_PREFIX}-demo-router"
FIP_FILE="${ENTITIES_PREFIX}-demo-fip.txt"
SECGROUP_NAME="${ENTITIES_PREFIX}-demo-secgroup"
SSH_KEYPAIR_DIR="${HOME}/.ssh/${CLOUD_ENV_NAME}"
EXTRA_VOLUME_SIZE_GB=${EXTRA_VOLUME_SIZE_GB:-"10"}
EXTRA_VOLUME_NAME="${ENTITIES_PREFIX}-demo-volume"
EXTRA_VOLUME_TYPE="${EXTRA_VOLUME_TYPE:-""}"
SERVER_CREATE_ADDITIONAL_ARGS="${SERVER_CREATE_ADDITIONAL_ARGS:-""}"
SERVER_EPHEMERAL_DISK_SIZE="${SERVER_EPHEMERAL_DISK_SIZE:-"0"}"
declare -A ROUTER_NAME_ARR
ROUTER_NAME_ARR[personal]="${ROUTER_NAME:-"internal-ipv4-general-private"}"
ROUTER_NAME_ARR[group]="${ROUTER_NAME:-"${ENTITIES_PREFIX}-demo-router"}"
declare -A NETWORK_NAME_ARR
NETWORK_NAME_ARR[personal]="${NETWORK_NAME:-"internal-ipv4-general-private"}"
NETWORK_NAME_ARR[group]="${NETWORK_NAME:-"${ENTITIES_PREFIX}-demo-network"}"
declare -A SUBNET_NAME_ARR
SUBNET_NAME_ARR[personal]="${SUBNET_NAME:-"internal-ipv4-general-private-172-16-0-0"}"
SUBNET_NAME_ARR[group]="${SUBNET_NAME:-"${ENTITIES_PREFIX}-demo-subnet"}"
#############################################################################
# functions
#############################################################################
source ${SCRIPT_DIR}/../../../../common/lib.sh.inc
#############################################################################
# main steps
#############################################################################
log_section "Using commandline tools:"
report_tools || myexit 1
log_section "Using OpenStack cloud:"
${OPENSTACK_BIN} version show -fcsv | grep identity || myexit 1
# detect project type (group/personal) --------------------------------------
project_type=group
if prj_name=$(is_personal_project); then
project_type=personal
fi
NETWORK_NAME="${NETWORK_NAME_ARR[${project_type}]}"
SUBNET_NAME="${SUBNET_NAME_ARR[${project_type}]}"
ROUTER_NAME="${ROUTER_NAME_ARR[${project_type}]}"
log "Using OpenStack ${project_type} project named: ${prj_name}"
# delete objects (from previous run) ----------------------------------------
log_section "Delete previously created objects (delete_objects_${project_type}_project)"
delete_objects_${project_type}_project
# ---------------------------------------------------------------------------
log_section "List currently allocated objects"
list_objects
# ---------------------------------------------------------------------------
log_section_keypress "Create (generate) locally SSH keypair, upload public SSH key to cloud"
mkdir -p "${SSH_KEYPAIR_DIR}"
chmod 700 "${SSH_KEYPAIR_DIR}"
if [ -s "${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME}" -a -s "${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME}.pub" ]; then
log "Reusing already existing SSH keypair at ${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME}"
else
ssh-keygen -t rsa -b 4096 -f "${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME}"
fi
${OPENSTACK_BIN} keypair create --type ssh --public-key "${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME}.pub" "${KEYPAIR_NAME}"
ls -la ${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME}*
# ---------------------------------------------------------------------------
log_section_keypress "Create cloud security groups (custom VM firewall) to allow outgoing traffic and incomming SSH traffic on port 22"
${OPENSTACK_BIN} security group create --description "${ENTITIES_PREFIX} demo default security group" "${SECGROUP_NAME}"
${OPENSTACK_BIN} security group rule create --ingress --proto tcp --remote-ip 0.0.0.0/0 --dst-port 22 "${SECGROUP_NAME}"
${OPENSTACK_BIN} security group rule create --egress --proto tcp --remote-ip 0.0.0.0/0 --dst-port 1:65535 "${SECGROUP_NAME}"
# ---------------------------------------------------------------------------
if [ "${project_type}" == "group" ]; then
log_section_keypress "Create cloud private network and subnet, so far isolated (CIDR:${SUBNET_CIDR})"
${OPENSTACK_BIN} network create "${NETWORK_NAME}"
NETWORK_ID=$(${OPENSTACK_BIN} network show "${NETWORK_NAME}" -f value -c id)
${OPENSTACK_BIN} subnet create "${SUBNET_NAME}" --network "${NETWORK_ID}" --subnet-range "${SUBNET_CIDR}" --dns-nameserver 8.8.4.4 --dns-nameserver 8.8.8.8
else
NETWORK_ID=$(${OPENSTACK_BIN} network show "${NETWORK_NAME}" -f value -c id)
log_section_keypress "Re-use existing network (${NETWORK_NAME}) and subnet (${SUBNET_NAME})"
fi
# ---------------------------------------------------------------------------
if [ "${EXTRA_VOLUME_SIZE_GB}" -gt 0 ]; then
log_keypress "Create cloud VM extra volume \"${EXTRA_VOLUME_NAME}\" with following configuration:\n" \
" size: ${EXTRA_VOLUME_SIZE_GB} GB, volume type: ${EXTRA_VOLUME_TYPE}"
${OPENSTACK_BIN} volume create ${EXTRA_VOLUME_TYPE:+--type=${EXTRA_VOLUME_TYPE}} --size "${EXTRA_VOLUME_SIZE_GB}" ${EXTRA_VOLUME_NAME}
fi
# ---------------------------------------------------------------------------
if [ -n "${SERVER_EPHEMERAL_DISK_SIZE}" -a "${SERVER_EPHEMERAL_DISK_SIZE}" -gt "0" ]; then
SERVER_CREATE_ADDITIONAL_ARGS="${SERVER_CREATE_ADDITIONAL_ARGS} --ephemeral=size=${SERVER_EPHEMERAL_DISK_SIZE}"
fi
log_section_keypress "Create cloud VM instance \"${SERVER_NAME}\" with following configuration:\n" \
" flavor: ${FLAVOR_NAME}, image/os: ${IMAGE_NAME}, network: ${NETWORK_NAME}\n" \
" keypair: ${KEYPAIR_NAME}, sec-group/firewall: ${SECGROUP_NAME})" \
" additional arguments: ${SERVER_CREATE_ADDITIONAL_ARGS}"
${OPENSTACK_BIN} server create --flavor "${FLAVOR_NAME}" --image "${IMAGE_NAME}" \
--network "${NETWORK_ID}" --key-name "${KEYPAIR_NAME}" \
--security-group "${SECGROUP_NAME}" ${SERVER_CREATE_ADDITIONAL_ARGS} "${SERVER_NAME}"
SERVER_ID=$(${OPENSTACK_BIN} server show "${SERVER_NAME}" -f value -c id)
log_section "Wait for VM instance \"${SERVER_NAME}\" being ACTIVE"
vm_wait_for_status "${SERVER_NAME}" "ACTIVE"
if [ "${EXTRA_VOLUME_SIZE_GB}" -gt 0 ]; then
log_section_keypress "Attach extra volume \"${EXTRA_VOLUME_NAME}\" (${EXTRA_VOLUME_SIZE_GB} GB) to VM \"${SERVER_NAME}\""
${OPENSTACK_BIN} server add volume ${SERVER_NAME} ${EXTRA_VOLUME_NAME} --device /dev/sdb
fi
# ---------------------------------------------------------------------------
if [ "${project_type}" == "group" ]; then
log_section "Route VM from internal software defined networking outside"
log_keypress " 1] Create route, associate router with external provider network and internal subnet (${SUBNET_CIDR})"
${OPENSTACK_BIN} router create "${ROUTER_NAME}"
${OPENSTACK_BIN} router set "${ROUTER_NAME}" --external-gateway "${EXTERNAL_NETWORK_NAME}"
${OPENSTACK_BIN} router add subnet "${ROUTER_NAME}" "${SUBNET_NAME}"
else
log " 1] Reuse existing router ${ROUTER_NAME} (may not be visible from personal projects)"
fi
# ---------------------------------------------------------------------------
log_keypress " 2] Allocate single FIP (floating ip) from external provider network"
FIP=$(${OPENSTACK_BIN} floating ip create "${EXTERNAL_NETWORK_NAME}" -f value -c name)
if [ -n "${FIP}" ]; then
echo "${FIP}" > "${FIP_FILE}"
echo "Successfully obtained public ipv4 floating IP adress (FIP): ${FIP}"
log " 3] Associate selected FIP with created VM"
${OPENSTACK_BIN} server add floating ip "${SERVER_NAME}" "${FIP}"
log_section "VM server instance access tests"
log_keypress " 1] TCP ping (ncat -z ${FIP} 22)"
test_vm_access_ncat "${FIP}"
log_keypress " 2] SSH command (ssh -i ${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME} ${VM_LOGIN}@${FIP})"
ssh-keygen -R ${FIP} &>/dev/null
ssh -o StrictHostKeyChecking=no -i "${SSH_KEYPAIR_DIR}/id_rsa.${KEYPAIR_NAME}" "${VM_LOGIN}@${FIP}" 'echo "";uname -a;uptime; echo "VM access succeeded!"'
else
log "Unable to allocate FIP address, VM is created but not accessible from internet."
fi
# ---------------------------------------------------------------------------
log_section_keypress "Object summary in profile ${ENTITIES_PREFIX}"
list_objects
# ---------------------------------------------------------------------------
log_section_keypress "Teardown of the objects (delete_objects_${project_type}_project)" \
"(Interrupt with CTRL-C if you want to keep the created infrastructure and skip its destruction)"
delete_objects_${project_type}_project
# Terraform demonstration
This Terraform module is able to demonstrate creation of two most used cloud infrastructure patterns:
## Two tier infrastructure: public bastion and private VM farm
Infrastructure consist of:
- public facing small bastion VM (sometimes called as jump VM)
- private VM farm
![two-tier-infra.png](/clouds/common/pictures/two-tier-infra.png)
## Single tier infrastructure i.e. public facing VM farm
Infrastructure consist of:
- public facing VM farm
![single-tier-infra.png](/clouds/common/pictures/single-tier-infra.png)
## Cloud VM configuration during system boot using cloud-init
Terraform demonstrates [how to configure VM servers on creation with cloud-init](modules/common/nodes-cloudinit.txt):
- Add ssh keys, disable SSH password auth
- Create partition and filesystemand mount extra data from extra volume
## Handling different project restrictions (quotas, shared networks, ...)
e-INFRA.CZ OpenStack cloud distinguishes between two project types: personal and group
([more the topic](https://docs.e-infra.cz/compute/openstack/technical-reference/brno-site/get-access/#personal-project)).
Terraform demo code with additional variable file [`personal-projects.tfvars`](./personal-projects.tfvars) shows how to support both project types i.e. how to parametrize infrastructure definition with minimal code duplications.
There is thin terraform wrapper [`terraform.sh`](./terraform.sh) abstracting the fact which project type you are on. It is not necessary to use the [`terraform.sh`](./terraform.sh) wrapper when you keep in mind that additional terraform variables file [`personal-projects.tfvars`](./personal-projects.tfvars) has to be passed at plan step when you are on personal project.
## Using the terraform demo
1. Clone the repository.
1. Load you OpenStack application credentials to environment variables `source project_openrc.sh.inc`
1. Override any infrastructure variables in [main.tf](main.tf) file if needed. Full set of variables can be found in [modules/common/variables.tf](modules/common/variables.tf).
1. In the [terraform root directory](/clouds/g2/ostrava/general/terraform) run following commands to initiate and validate environment
* `./terraform.sh validate-tools`
* `./terraform.sh detect-cloud`
* `./terraform.sh detect-project`
* `./terraform.sh init`
* `./terraform.sh validate`
1. In the [same directory](/clouds/g2/ostrava/general/terraform) run commands to deploy cloud infrastructure
* `./terraform.sh plan --out plan`
* `./terraform.sh apply plan`
1. Once you need to change the infrastructure, first modify the infrastructure declaration and repeat above steps to deploy changes.
1. Cloud resources can be deleted with `./terraform.sh destroy`.
> You may use terraform directly without the [`terraform.sh`](./terraform.sh) wrapper, but then keep in your mind that just for personal project type you should pass additional variable file [`personal-projects.tfvars`](./personal-projects.tfvars) at plan step (i.e. `terraform plan --out personal-project.plan --var-file=personal-projects.tfvars`)
Detailed terminal transcripts show how to run terraform demo inside personal project ([with](./terraform-2tier_public_bastion_private_vm_farm-personal-project-wrapper.log) / [without the wrapper](./terraform-2tier_public_bastion_private_vm_farm-personal-project-no-wrapper.log)) as well as under group project([with](./terraform-2tier_public_bastion_private_vm_farm-group-project-wrapper.log) / [without the wrapper](./terraform-2tier_public_bastion_private_vm_farm-group-project-no-wrapper.log)).
## Access to the VM nodes
In single tier infrastructure you access directly the individual VM nodes via SSH on public IP addresses.
Two tier infrastructure requires the access following way:
1. Establish the connection with bastion
```sh
sshuttle -r ubuntu@<bastion-ip>
```
1. Connect directly to VM nodes via SSH on private IP addresses:
```sh
ssh ubuntu@<vm-node-ip-from-10.10.10.0/24>
```
terraform {
backend "local" {}
}
module "toplevel" {
# infrastructure type:
# -------------------------------------------------------------------------
# two tier infrastructure (2tier_public_bastion_private_vm_farm module):
# * single public facing tiny bastion VM
# * <nodes_count> private HPC VM farm
source = "./modules/2tier_public_bastion_private_vm_farm"
# single tier infrastructure (1tier_public_vm_farm monule)
# * <nodes_count> public HPC VM farm
#source = "./modules/1tier_public_vm_farm"
infra_name = "general-tf-demo"
nodes_count = 5
nodes_flavor = "e1.small"
nodes_image = "ubuntu-jammy-x86_64"
public_external_network = "external-ipv4-general-public"
nodes_extra_volume_size = 10 # extra volume size in GB
# OpenStack project type:
# -------------------------------------------------------------------------
# root variables wired 1:1 to "toplevel" module to be able to toggle between
# group and personal project infrastructure
router_creation_enable = var.router_creation_enable
internal_network_creation_enable = var.internal_network_creation_enable
internal_network_name = var.internal_network_name
internal_subnet_creation_enable = var.internal_subnet_creation_enable
internal_subnet_name = var.internal_subnet_name
}
../common/instances.tf
\ No newline at end of file
../common/keypair.tf
\ No newline at end of file
../common/networks.tf
\ No newline at end of file
../common/nodes-cloudinit.txt
\ No newline at end of file
# Floating IPs
resource "openstack_networking_floatingip_v2" "nodes_fips" {
count = var.nodes_count
pool = var.public_external_network
}
resource "openstack_compute_floatingip_associate_v2" "nodes_fips_associations" {
count = var.nodes_count
floating_ip = element(openstack_networking_floatingip_v2.nodes_fips.*.address, count.index)
instance_id = element(openstack_compute_instance_v2.nodes.*.id, count.index)
}
../common/providers.tf
\ No newline at end of file
../common/secgroup_rules.tf
\ No newline at end of file
../common/variables.tf
\ No newline at end of file
../common/volumes.tf
\ No newline at end of file
users:
- default
- name: ubuntu
shell: /bin/bash
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5fFLKPzxna7fq6kh1CHaIQFnpqXybqLqGs4ZpTDbIrEn7xjCsdyxMm9dcptyS0t6BzXO56BlJyYsR1GWo4rp3g8rMmb9u6/oHmMwgn7G/GLgsaAAO5XHW0A3UEJl3JHfCQLHkN1APQ4dy7gNTG24ahH/pcyr4rV0SsjPUCqFqkSMDZxRgfllNGftxWVHR2fYfPALLrGdhR/SjNSIs3pwBIUXaSfF3aBLsjeGBj4y5YsiR9yI3y2gUmpURROofTvtE7Fp8OIgmWCVqRe70CKDbl17HFbz3FIqYwZLAQHILcp1M45zV8koSOjW5+3C/ZJYzBKOnw/a/1Cw3uHFDrZfRqKLMP/gagnoEPRHjfmUsJ3UJO0eXDCXmnH7F48xBI76CgxYl039/SMmJ2mR0KqAHGnwqVmJI3yBGyK+Z4iEwk+JVDLEB14RHiMp2/I/tYpDWFE1IOigFFNLdfaZrVFY1/fD+yGGyFUO1Wo+CKb8tpndLB4H3Yj2MLRDP/aNpLC4M7Aru7hWnUF81aE/VUAqR6CP2vsHzlAOmH08pOlP9FVITinmJqzBL15l+W7q0Rhh4WBRO4ixlrtRJDNL2wm0vf+GiJnXligFtZ7Cw8bk/LcAe37WqcTl0xLKDyPSw4SvWOC2aE6BVuJjPAhoUUcBaNzoBa7lf4eb+FS4tquTZlQ== freznicek@LenovoThinkCentreE73
ssh_pwauth: false
resource "openstack_compute_instance_v2" "bastion" {
name = "${var.infra_name}-${var.bastion_name}"
image_name = var.bastion_image
flavor_name = var.bastion_flavor
key_pair = openstack_compute_keypair_v2.pubkey.name
security_groups = [openstack_networking_secgroup_v2.secgroup_default.name]
user_data = "#cloud-config\nhostname: ${var.infra_name}-${var.bastion_name}.local\n${file("${path.module}/bastion-cloudinit.txt")}"
network {
uuid = var.internal_network_creation_enable ? openstack_networking_network_v2.network_default[0].id : data.openstack_networking_network_v2.internal_shared_personal_network[0].id
port = openstack_networking_port_v2.bastion_port.id
}
}
# Floating IPs (only for bastion node)
resource "openstack_networking_floatingip_v2" "bastion_fip" {
pool = var.public_external_network
}
resource "openstack_compute_floatingip_associate_v2" "bastion_fip_associate" {
floating_ip = openstack_networking_floatingip_v2.bastion_fip.address
instance_id = openstack_compute_instance_v2.bastion.id
}
# Ports
resource "openstack_networking_port_v2" "bastion_port" {
name = "${var.infra_name}-${var.bastion_name}-port"
network_id = var.internal_network_creation_enable ? openstack_networking_network_v2.network_default[0].id : data.openstack_networking_network_v2.internal_shared_personal_network[0].id
admin_state_up = "true"
security_group_ids = [openstack_networking_secgroup_v2.secgroup_default.id]
fixed_ip {
subnet_id = var.internal_subnet_creation_enable ? openstack_networking_subnet_v2.subnet_default[0].id : data.openstack_networking_subnet_v2.internal_shared_personal_subnet[0].id
}
}
../common/instances.tf
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment