Skip to content

Commit

Permalink
[WIP] ZTP Left shifting for the Telco QE KPI testing
Browse files Browse the repository at this point in the history
This commit is a PoC by now.

NOTE: wait job must be restored!!!!

Signed-off-by: Carlos Cardenosa <[email protected]>
  • Loading branch information
ccardenosa committed Jan 8, 2025
1 parent 51e766d commit b8d66cd
Show file tree
Hide file tree
Showing 13 changed files with 665 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
base_images:
base:
name: "4.17"
namespace: ocp
tag: ansible
cli:
name: "4.17"
namespace: ocp
tag: cli
build_root:
project_image:
dockerfile_literal: |-
FROM quay.io/centos/centos:stream9
RUN \
dnf install -y git python3-pip iputils nmap-ncat && \
dnf clean all && \
curl -sSL -o openshift-client-linux.tar.gz https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/4.11.9/openshift-client-linux.tar.gz && \
tar -zxvf openshift-client-linux.tar.gz -C /usr/local/bin oc kubectl && \
rm -f openshift-client-linux.tar.gz && \
pip3 install --no-cache-dir \
"ansible==8.7.0" \
"netaddr==1.3.0" \
&& \
echo
releases:
latest:
release:
architecture: amd64
channel: candidate
version: "4.17"
resources:
'*':
limits:
memory: 4Gi
requests:
cpu: 100m
memory: 200Mi
tests:
- as: telcov10n-sno-virtualised-hub-ztp-f360
cluster: build05
cron: '* 1 * * *'
steps:
env:
AUX_HOST: helix92.telcoqe.eng.rdu2.dc.redhat.com
MCH_NAMESPACE: open-cluster-management
OCP_HUB_VERSION: "4.17"
OPERATORS: |
# - local-storage-operator
- lvms-operator
- openshift-gitops-operator
- advanced-cluster-management
- topology-aware-lifecycle-manager
# - multicluster-engine
SHARED_HUB_CLUSTER_PROFILE: ztp-hub-preserved-prod-cluster_profile_dir
test:
- ref: install-operators-verify
- ref: telcov10n-sno-hub-ztp-verify-hub
workflow: telcov10n-sno-virtualised-hub-ztp
zz_generated_metadata:
branch: ztp-left-shifting-kpi
org: openshift-kni
repo: eco-ci-cd
variant: sno-virtualised-hub-ztp-4.17-latest
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
periodics:
- agent: kubernetes
cluster: build05
cron: '* 1 * * *'
decorate: true
decoration_config:
skip_cloning: true
extra_refs:
- base_ref: ztp-left-shifting-kpi
org: openshift-kni
repo: eco-ci-cd
labels:
ci-operator.openshift.io/cluster: build05
ci-operator.openshift.io/variant: sno-virtualised-hub-ztp-4.17-latest
ci.openshift.io/generator: prowgen
pj-rehearse.openshift.io/can-be-rehearsed: "true"
name: periodic-ci-openshift-kni-eco-ci-cd-ztp-left-shifting-kpi-sno-virtualised-hub-ztp-4.17-latest-telcov10n-sno-virtualised-hub-ztp-f360
spec:
containers:
- args:
- --gcs-upload-secret=/secrets/gcs/service-account.json
- --image-import-pull-secret=/etc/pull-secret/.dockerconfigjson
- --report-credentials-file=/etc/report/credentials
- --secret-dir=/secrets/ci-pull-credentials
- --target=telcov10n-sno-virtualised-hub-ztp-f360
- --variant=sno-virtualised-hub-ztp-4.17-latest
command:
- ci-operator
image: ci-operator:latest
imagePullPolicy: Always
name: ""
resources:
requests:
cpu: 10m
volumeMounts:
- mountPath: /secrets/ci-pull-credentials
name: ci-pull-credentials
readOnly: true
- mountPath: /secrets/gcs
name: gcs-credentials
readOnly: true
- mountPath: /secrets/manifest-tool
name: manifest-tool-local-pusher
readOnly: true
- mountPath: /etc/pull-secret
name: pull-secret
readOnly: true
- mountPath: /etc/report
name: result-aggregator
readOnly: true
serviceAccountName: ci-operator
volumes:
- name: ci-pull-credentials
secret:
secretName: ci-pull-credentials
- name: manifest-tool-local-pusher
secret:
secretName: manifest-tool-local-pusher
- name: pull-secret
secret:
secretName: registry-pull-credentials
- name: result-aggregator
secret:
secretName: result-aggregator
Original file line number Diff line number Diff line change
@@ -0,0 +1,261 @@
#!/bin/bash

set -o nounset
set -o errexit
set -o pipefail

echo "************ Fix container user ************"
# Fix user IDs in a container
[ -e "${HOME}/fix_uid.sh" ] && "${HOME}/fix_uid.sh" || echo "${HOME}/fix_uid.sh was not found" >&2

function load_env {

export BASTION_VHUB_HOST=${AUX_HOST}

####
export BASTION_VHUB_HOST_SSH_PRI_KEY_FILE="${PWD}/remote-hypervisor-ssh-b64-privkey"
cat /var/run/telcov10n-ztp-left-shifting/remote-hypervisor-ssh-b64-privkey | base64 -d > ${BASTION_VHUB_HOST_SSH_PRI_KEY_FILE}
export CLUSTER_SSH_PUB_KEY="c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFEbTloYjZpVFpKeXBFbXpnNElaNzY3emU2MFVHaEJXbmpQWGhvdldWQjd1S3B1dGRMelpobWxvMzZpZmtYci9EVGs4TkdtNDdyNmtYbXo5TkFGMHBESGE1alg2eUpGbmhTNHo1TlkvbXpzVVg0MWd3aXFCS1lIZ2RwL0tFMXlsRThtYk5vbjVacGFhR3ZiODc2bXlqalBqUHdXc0Q4aHZYWmlyQTVROFRmRGIvUHZneTFkaFZIL3VOMDVJcDF2VnNwK2JGR01QVUpWV1ZVeS9FYnk1eFc2T0p2K0ZCT1FxNG51NnRzbERabEhZWFgyVFNHcmxXNHgwaS9vUU1wS3UvWTh5Z0FkaldxbUF5NlVCY2hvMW5OV3kxNWNwMGpJNUZoanplMTcxdlNXWkxBcUpZK2VGY0wya3QvMDlSblkrTVh5WS90SWYrcU5NeUJFMlFsdGlnYWgK"

####
CLUSTER_B64_PULL_SECRET="$(cat /var/run/telcov10n-ztp-left-shifting/b64-pull-secret)"
export CLUSTER_B64_PULL_SECRET

####
VM_PASSWD="$(cat /var/run/telcov10n-ztp-left-shifting/cluster-admin-pass)"
export VM_PASSWD

####
export BASTION_VHUB_HOST_USER="telcov10n"
export VM_BOOTSTRAP_IP="192.168.80.100"
export VM_CONTROL_PLANE_0_IP="192.168.80.10"
export NETWORK_NIC="eno12399"
export NETWORK_BRIDGE_NAME="baremetal"
export NETWORK_BRIDGE_GW_IP="192.168.80.1/22"
export CLUSTER_NAME="vhub"
export CLUSTER_ZTP_IN_PROW_POOL_PATH="/var/lib/libvirt/images"
export CLUSTER_VERSION="stable"
export CLUSTER_TAG="4.17"
export CLUSTER_ZTP_IN_PROW_DOMAIN="ztp-left-shifting.kpi.telco.lab"
export CLUSTER_ZTP_IN_PROW_API_IP="192.168.80.5"
export CLUSTER_ZTP_IN_PROW_INGRESS_IP="192.168.80.6"
export CLUSTER_BRIDGE_NIC_MAC_BOOTSTRAP="cc:a4:de:aa:aa:01"
export CLUSTER_BRIDGE_NIC_MAC="cc:a4:de:aa:aa:10"
export CLUSTER_SPOKE_NIC_MAC="b4:83:51:0f:92:b2"
export BAREMETAL_SPOKE="192.168.80.20"
export SOCKS5_PROXY_PORT="3124"
}

function generate_inventory_file {

echo "************ telcov10n Generate Ansible inventory file to connect to Bastion host ************"

load_env

inventory_file="${PWD}/bastion-vhub-node-inventory.yml"
cat <<EOF >| $inventory_file
all:
children:
prow_bastion:
hosts:
bastion-vhub-node:
ansible_host: "{{ lookup('ansible.builtin.env', 'BASTION_VHUB_HOST') }}"
ansible_user: "{{ lookup('ansible.builtin.env', 'BASTION_VHUB_HOST_USER') }}"
ansible_ssh_private_key_file: "{{ lookup('ansible.builtin.env', 'BASTION_VHUB_HOST_SSH_PRI_KEY_FILE') }}"
ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
# kcli_wrp_install_depencencies: true
# kcli_wrp_oc:
# url: https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz
# dest: /usr/local/bin
# kcli_wrp_libvirt:
# pool:
# name: "{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}"
# path: "{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_POOL_PATH') }}"
kcli_wrp:
networks:
- name: "{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}"
bridge: true
bridgename: "{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}"
nic: eno12399
bridge_ip: "{{ lookup('ansible.builtin.env', 'NETWORK_BRIDGE_GW_IP') }}"
clusters:
- type: openshift
force_installation: true
parameters:
cluster: "{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}"
version: "{{ lookup('ansible.builtin.env', 'CLUSTER_VERSION') }}"
tag: "{{ lookup('ansible.builtin.env', 'CLUSTER_TAG') }}"
domain: "{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }}"
pool: "{{kcli_wrp_libvirt.pool.name}}"
nets:
- "{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}"
keys:
- "{{ lookup('ansible.builtin.env', 'CLUSTER_SSH_PUB_KEY') }}"
ctlplanes: 1
workers: 0
memory: 96000
numcpus: 48
disks:
- 200
- 100
- 100
- 100
- 100
base64_pull_secret: "{{ lookup('ansible.builtin.env', 'CLUSTER_B64_PULL_SECRET') }}"
api_ip: "{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_API_IP') }}"
ingress_ip: "{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_INGRESS_IP') }}"
apps:
- local-storage-operator
- openshift-gitops-operator
- advanced-cluster-management
- topology-aware-lifecycle-manager
- multicluster-engine
vmrules:
- vhub-bootstrap:
rootpassword: "{{ lookup('ansible.builtin.env', 'VM_PASSWD') }}"
nets:
- name: "{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}"
mac: "{{ lookup('ansible.builtin.env', 'CLUSTER_BRIDGE_NIC_MAC_BOOTSTRAP') }}"
- vhub-ctlplane-0:
rootpassword: "{{ lookup('ansible.builtin.env', 'VM_PASSWD') }}"
nets:
- name: "{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}"
mac: "{{ lookup('ansible.builtin.env', 'CLUSTER_BRIDGE_NIC_MAC') }}"
kcli_wrp_credentials:
clusters_details: ~/.kcli/clusters
kubeconfig: auth/kubeconfig
kubeadmin_password: auth/kubeadmin-password
kcli_wrp_dnsmasq:
use_nm_plugin: true
drop_in_files:
- path: /etc/NetworkManager/dnsmasq.d/70-{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }}.conf
content: |
# /etc/NetworkManager/dnsmasq.d/70-{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }}.conf
log-dhcp
log-queries
# log-facility=/var/log/dnsmasq.log
strict-order
server=10.11.5.160
server=10.2.70.215
# except-interface=lo # <--- To check local resolves
interface="{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}"
listen-address=127.0.0.1,{{ lookup('ansible.builtin.env', 'NETWORK_BRIDGE_GW_IP') | ansible.utils.ipaddr }}
dhcp-range={{ lookup('ansible.builtin.env', 'NETWORK_BRIDGE_GW_IP') | ansible.utils.ipaddr('network') }},static
dhcp-no-override
# dhcp-authoritative <---- No needed
# dhcp-lease-max=253
# Bridge setup:
# dhcp-option=121,{{ lookup('ansible.builtin.env', 'NETWORK_BRIDGE_GW_IP') | ansible.utils.ipaddr('subnet') }}
dhcp-host={{ lookup('ansible.builtin.env', 'CLUSTER_BRIDGE_NIC_MAC_BOOTSTRAP') }},{{ lookup('ansible.builtin.env', 'VM_BOOTSTRAP_IP') }},{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}-bootstrap.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }},set:{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}
# Virtualised SNO Hub main NIC
dhcp-host={{ lookup('ansible.builtin.env', 'CLUSTER_BRIDGE_NIC_MAC') }},{{ lookup('ansible.builtin.env', 'VM_CONTROL_PLANE_0_IP') }},{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}-ctlplane-0.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }},set:{{ lookup('ansible.builtin.env', ' NETWORK_BRIDGE_NAME') }}
# SNO Spoke main NIC
dhcp-host={{ lookup('ansible.builtin.env', 'CLUSTER_BRIDGE_NIC_MAC') }},{{ lookup('ansible.builtin.env', 'BAREMETAL_SPOKE') }}
# This file sets up the local OCP cluster domain and defines some aliases and a wildcard.
local=/{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }}/
# OCP cluster API
address=/api.{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }}/{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_API_IP') }}
address=/api-int.{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }}/{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_API_IP') }}
# OCP cluster INGRESS
address=/.apps.{{ lookup('ansible.builtin.env', 'CLUSTER_NAME') }}.{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_DOMAIN') }}/{{ lookup('ansible.builtin.env', 'CLUSTER_ZTP_IN_PROW_INGRESS_IP') }}
kcli_wrp_firewalld:
zone_files:
- path: /etc/firewalld/zones/public.xml
content: |
<?xml version="1.0" encoding="utf-8"?>
<zone>
<short>Public</short>
<description>For use in public areas. You do not trust the other computers on networks to not harm your computer. Only selected incoming connections are accepted.</description>
<service name="ssh"/>
<service name="dhcpv6-client"/>
<service name="cockpit"/>
<service name="dhcp"/>
<service name="dns"/>
<service name="https"/>
<port port="{{ lookup('ansible.builtin.env', 'SOCKS5_PROXY_PORT') }}" protocol="tcp"/>
<masquerade/>
<forward/>
</zone>
kcli_wrp_socks5_proxy:
description: SOCKS5 Proxy Server for ZTP Left shifting
username: "{{ lookup('ansible.builtin.env', 'BASTION_VHUB_HOST_USER') }}"
host: "{{ lookup('ansible.builtin.env', 'BASTION_VHUB_HOST') }}"
listen_port: "{{ lookup('ansible.builtin.env', 'SOCKS5_PROXY_PORT') }}"
ssh_options: "-4"
EOF
}

function install_virtualised_hub_cluster {

# cat $inventory_file
ansible-playbook -i ${inventory_file} playbooks/deploy-virtualised-hub.yml -vvv

ls -l /tmp/kubeconfig-${CLUSTER_NAME}-via-socks5-proxy/
grep -HiIn 'server:\|proxy-url:' /tmp/kubeconfig-${CLUSTER_NAME}-via-socks5-proxy/*
# mv -v /tmp/kubeconfig-${CLUSTER_NAME}-via-socks5-proxy ${SHARED_DIR}/
oc --kubeconfig /tmp/kubeconfig-${CLUSTER_NAME}-via-socks5-proxy get nodes -owide
}

function install_ansible_collections {
ansible-galaxy collection install -r requirements.yml
}

function main {

ansible --version
echo
pwd
echo
ls -lR
echo

echo "Runing Prow script..."

install_ansible_collections
generate_inventory_file
install_virtualised_hub_cluster
}

function pr_debug_mode_waiting {

echo "################################################################################"
echo "# Using pull request ${PULL_NUMBER}. Entering in the debug mode waiting..."
echo "################################################################################"

TZ=UTC
END_TIME=$(date -d "${TIMEOUT}" +%s)
debug_done=/tmp/debug.done

while sleep 1m; do

test -f ${debug_done} && break
echo
echo "-------------------------------------------------------------------"
echo "'${debug_done}' not found. Debugging can continue... "
now=$(date +%s)
if [ ${END_TIME} -lt ${now} ] ; then
echo "Time out reached. Exiting by timeout..."
break
else
echo "Now: $(date -d @${now})"
echo "Timeout: $(date -d @${END_TIME})"
fi
echo "Note: To exit from debug mode before the timeout is reached,"
echo "just run the following command from the POD Terminal:"
echo "$ touch ${debug_done}"

done

echo
echo "Exiting from Pull Request debug mode..."
}

trap pr_debug_mode_waiting EXIT
main
Loading

0 comments on commit b8d66cd

Please sign in to comment.