Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Set up ci-entrypoint to work with AKS management cluster #5241

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
31 changes: 23 additions & 8 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -310,12 +310,16 @@ verify-codespell: codespell ## Verify codespell.
##@ Development:

.PHONY: install-tools # populate hack/tools/bin
install-tools: $(ENVSUBST) $(KUSTOMIZE) $(KUBECTL) $(HELM) $(GINKGO) $(KIND) $(AZWI)
install-tools: $(ENVSUBST) $(KUSTOMIZE) $(KUBECTL) $(HELM) $(GINKGO) $(KIND) $(AZWI) $(YQ)

.PHONY: create-management-cluster
create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create a management cluster.
# Create kind management cluster.
$(MAKE) kind-create
# Create management cluster.
if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then \
$(MAKE) kind-create ; \
else \
$(MAKE) aks-create ; \
fi

# Install cert manager and wait for availability
./hack/install-cert-manager.sh
Expand All @@ -331,7 +335,9 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create
timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.2.5/addon-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done"

# Deploy CAPZ
$(KIND) load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=$(KIND_CLUSTER_NAME)
@if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then \
$(KIND) load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=$(KIND_CLUSTER_NAME) ; \
fi
timeout --foreground 300 bash -c "until $(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f - --server-side=true; do sleep 5; done"

# Wait for CAPI deployments
Expand Down Expand Up @@ -361,16 +367,16 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create

.PHONY: create-workload-cluster
create-workload-cluster: $(ENVSUBST) $(KUBECTL) ## Create a workload cluster.
# Create workload Cluster.
@if [ -z "${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY}" ]; then \
export AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY=$(shell cat $(AZURE_IDENTITY_ID_FILEPATH)); \
fi; \
# TODO: change this so it doesn't source aks-mgmt-vars.env when it is using a kind cluster
if [ -f "$(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE)" ]; then \
timeout --foreground 300 bash -c "until $(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | $(KUBECTL) apply -f -; do sleep 5; done"; \
timeout --foreground 300 bash -c "source aks-mgmt-vars.env && env && until $(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | $(KUBECTL) apply -f -; do sleep 5; done"; \
elif [ -f "$(CLUSTER_TEMPLATE)" ]; then \
timeout --foreground 300 bash -c "until $(ENVSUBST) < "$(CLUSTER_TEMPLATE)" | $(KUBECTL) apply -f -; do sleep 5; done"; \
timeout --foreground 300 bash -c "source aks-mgmt-vars.env && env && until $(ENVSUBST) < "$(CLUSTER_TEMPLATE)" | $(KUBECTL) apply -f -; do sleep 5; done"; \
else \
timeout --foreground 300 bash -c "until curl --retry "$(CURL_RETRIES)" "$(CLUSTER_TEMPLATE)" | "$(ENVSUBST)" | $(KUBECTL) apply -f -; do sleep 5; done"; \
timeout --foreground 300 bash -c "source aks-mgmt-vars.env && env && until curl --retry "$(CURL_RETRIES)" "$(CLUSTER_TEMPLATE)" | "$(ENVSUBST)" | $(KUBECTL) apply -f -; do sleep 5; done"; \
fi

# Wait for the kubeconfig to become available.
Expand All @@ -379,6 +385,8 @@ create-workload-cluster: $(ENVSUBST) $(KUBECTL) ## Create a workload cluster.
$(KUBECTL) get secret/$(CLUSTER_NAME)-kubeconfig -n default -o json | jq -r .data.value | base64 --decode > ./kubeconfig
$(KUBECTL) -n default wait --for=condition=Ready --timeout=10m cluster "$(CLUSTER_NAME)"

./scripts/peer-vnets.sh

@echo 'run "$(KUBECTL) --kubeconfig=./kubeconfig ..." to work with the new target cluster'

.PHONY: create-cluster
Expand Down Expand Up @@ -748,6 +756,13 @@ aks-create: $(KUBECTL) ## Create aks cluster as mgmt cluster.

.PHONY: tilt-up
tilt-up: install-tools ## Start tilt and build kind cluster if needed.
# Create management cluster.
if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then \
$(MAKE) kind-create ; \
else \
$(MAKE) aks-create ; \
fi

@if [ -z "${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY}" ]; then \
export AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY=$(shell cat $(AZURE_IDENTITY_ID_FILEPATH)); \
fi; \
Expand Down
6 changes: 5 additions & 1 deletion hack/create-dev-cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,11 @@ export ASO_CREDENTIAL_SECRET_NAME=${ASO_CREDENTIAL_SECRET_NAME:="aso-credentials
capz::util::generate_ssh_key

echo "================ DOCKER BUILD ==============="
PULL_POLICY=IfNotPresent make modules docker-build
PULL_POLICY=IfNotPresent make modules docker-build docker-push
# TODO: add a check for AKS vs kind
if [ -v "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then
PULL_POLICY=IfNotPresent make docker-push
fi

setup() {
echo "================ MAKE CLEAN ==============="
Expand Down
101 changes: 91 additions & 10 deletions scripts/aks-as-mgmt.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ set -o nounset # exit when script tries to use undeclared variables.
set -o pipefail # make the pipeline fail if any command in it fails.

REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
# shellcheck source=hack/common-vars.sh
source "${REPO_ROOT}/hack/common-vars.sh"
# shellcheck source=hack/ensure-azcli.sh
source "${REPO_ROOT}/hack/ensure-azcli.sh" # install az cli and login using WI
# shellcheck source=hack/ensure-tags.sh
Expand All @@ -30,7 +32,7 @@ make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${AZWI##*/}"
export MGMT_CLUSTER_NAME="${MGMT_CLUSTER_NAME:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # management cluster name
export AKS_RESOURCE_GROUP="${AKS_RESOURCE_GROUP:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # resource group name
export AKS_NODE_RESOURCE_GROUP="node-${AKS_RESOURCE_GROUP}"
export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.30.2}"
export AKS_MGMT_KUBERNETES_VERSION="${AKS_MGMT_KUBERNETES_VERSION:-v1.30.2}"
export AZURE_LOCATION="${AZURE_LOCATION:-westus2}"
export AKS_NODE_VM_SIZE="${AKS_NODE_VM_SIZE:-"Standard_B2s"}"
export AKS_NODE_COUNT="${AKS_NODE_COUNT:-1}"
Expand All @@ -42,6 +44,12 @@ export AZWI_STORAGE_CONTAINER="\$web"
export SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH="${SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH:-}"
export SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH="${SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH:-}"
export REGISTRY="${REGISTRY:-}"
export AKS_MGMT_VNET_NAME="${AKS_MGMT_VNET_NAME:-"aks-mgmt-vnet-${RANDOM_SUFFIX}"}"
export AKS_MGMT_VNET_CIDR="${AKS_MGMT_VNET_CIDR:-"20.255.0.0/16"}"
export AKS_MGMT_SERVICE_CIDR="${AKS_MGMT_SERVICE_CIDR:-"20.255.254.0/24"}"
export AKS_MGMT_DNS_SERVICE_IP="${AKS_MGMT_DNS_SERVICE_IP:-"20.255.254.100"}"
export AKS_MGMT_SUBNET_NAME="${AKS_MGMT_SUBNET_NAME:-"aks-mgmt-subnet-${RANDOM_SUFFIX}"}"
export AKS_MGMT_SUBNET_CIDR="${AKS_MGMT_SUBNET_CIDR:-"20.255.0.0/24"}"

export AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID:-}"
export AZURE_CLIENT_ID="${AZURE_CLIENT_ID:-}"
Expand All @@ -63,7 +71,7 @@ main() {
echo "MGMT_CLUSTER_NAME: $MGMT_CLUSTER_NAME"
echo "AKS_RESOURCE_GROUP: $AKS_RESOURCE_GROUP"
echo "AKS_NODE_RESOURCE_GROUP: $AKS_NODE_RESOURCE_GROUP"
echo "KUBERNETES_VERSION: $KUBERNETES_VERSION"
echo "AKS_MGMT_KUBERNETES_VERSION: $AKS_MGMT_KUBERNETES_VERSION"
echo "AZURE_LOCATION: $AZURE_LOCATION"
echo "AKS_NODE_VM_SIZE: $AKS_NODE_VM_SIZE"
echo "AZURE_NODE_MACHINE_TYPE: $AZURE_NODE_MACHINE_TYPE"
Expand All @@ -76,6 +84,12 @@ main() {
echo "SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH: $SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH"
echo "REGISTRY: $REGISTRY"
echo "APISERVER_LB_DNS_SUFFIX: $APISERVER_LB_DNS_SUFFIX"
echo "AKS_MGMT_VNET_NAME: $AKS_MGMT_VNET_NAME"
echo "AKS_MGMT_VNET_CIDR: $AKS_MGMT_VNET_CIDR"
echo "AKS_MGMT_SERVICE_CIDR: $AKS_MGMT_SERVICE_CIDR"
echo "AKS_MGMT_DNS_SERVICE_IP: $AKS_MGMT_DNS_SERVICE_IP"
echo "AKS_MGMT_SUBNET_NAME: $AKS_MGMT_SUBNET_NAME"
echo "AKS_MGMT_SUBNET_CIDR: $AKS_MGMT_SUBNET_CIDR"

echo "AZURE_SUBSCRIPTION_ID: $AZURE_SUBSCRIPTION_ID"
echo "AZURE_CLIENT_ID: $AZURE_CLIENT_ID"
Expand All @@ -102,6 +116,17 @@ create_aks_cluster() {
--location "${AZURE_LOCATION}" \
--output none --only-show-errors \
--tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}"


echo "creating vnet for the resource group ${AKS_RESOURCE_GROUP}"
az network vnet create \
--resource-group "${AKS_RESOURCE_GROUP}"\
--name "${AKS_MGMT_VNET_NAME}" \
--address-prefix "${AKS_MGMT_VNET_CIDR}" \
--subnet-name "${AKS_MGMT_SUBNET_NAME}" \
--subnet-prefix "${AKS_MGMT_SUBNET_CIDR}" \
--output none --only-show-errors \
--tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}"
fi

aks_exists=$(az aks show --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" 2>&1 || true) # true because we want to continue if the command fails
Expand All @@ -110,13 +135,16 @@ create_aks_cluster() {
az aks create --name "${MGMT_CLUSTER_NAME}" \
--resource-group "${AKS_RESOURCE_GROUP}" \
--location "${AZURE_LOCATION}" \
--kubernetes-version "${KUBERNETES_VERSION}" \
--kubernetes-version "${AKS_MGMT_KUBERNETES_VERSION}" \
--node-count "${AKS_NODE_COUNT}" \
--node-vm-size "${AKS_NODE_VM_SIZE}" \
--node-resource-group "${AKS_NODE_RESOURCE_GROUP}" \
--vm-set-type VirtualMachineScaleSets \
--generate-ssh-keys \
--network-plugin azure \
--vnet-subnet-id "/subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${AKS_RESOURCE_GROUP}/providers/Microsoft.Network/virtualNetworks/${AKS_MGMT_VNET_NAME}/subnets/${AKS_MGMT_SUBNET_NAME}" \
--service-cidr "${AKS_MGMT_SERVICE_CIDR}" \
--dns-service-ip "${AKS_MGMT_DNS_SERVICE_IP}" \
--tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" \
--output none --only-show-errors;
elif echo "$aks_exists" | grep -q "${MGMT_CLUSTER_NAME}"; then
Expand All @@ -127,6 +155,7 @@ create_aks_cluster() {
fi

# check and save kubeconfig
echo -e "\n"
echo "saving credentials of cluster ${MGMT_CLUSTER_NAME} in ${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}"
az aks get-credentials --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" \
--file "${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}" --only-show-errors
Expand Down Expand Up @@ -172,11 +201,63 @@ create_aks_cluster() {
sleep 5
done

# If storage account var is set:
if [ -n "${AZURE_STORAGE_ACCOUNT}" ]; then
echo "assigning storage blob data reader role to the service principal"
until az role assignment create --assignee-object-id "${AKS_MI_OBJECT_ID}" --role "Storage Blob Data Reader" \
--scope "/subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${AZURE_STORAGE_ACCOUNT_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${AZURE_STORAGE_ACCOUNT}/blobServices/default/containers/${AZURE_BLOB_CONTAINER_NAME}" \
--assignee-principal-type ServicePrincipal; do
echo "retrying to assign storage blob data reader role to the service principal"
sleep 5
done
fi

if [[ "${REGISTRY:-}" =~ \.azurecr\.io ]]; then
# if we are using the prow Azure Container Registry, login.
acrname="${REGISTRY%%.*}"
az acr login --name "$acrname"
echo "assigning azure container registry contributor role to the service principal"
until az role assignment create --assignee-object-id "${AKS_MI_OBJECT_ID}" --role "Contributor" \
--scope "/subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${AZURE_CONTAINER_REGISTRY_RESOURCE_GROUP}/providers/Microsoft.ContainerRegistry/registries/$acrname" \
--assignee-principal-type ServicePrincipal; do
echo "retrying to assign azure container registry contributor role to the service principal"
sleep 5
done
fi

echo "using ASO_CREDENTIAL_SECRET_MODE as podidentity"
ASO_CREDENTIAL_SECRET_MODE="podidentity"
}

set_env_varaibles(){
rm aks-mgmt-vars.env || true
cat <<EOF > aks-mgmt-vars.env
export MGMT_CLUSTER_NAME="${MGMT_CLUSTER_NAME}"
export AKS_RESOURCE_GROUP="${AKS_RESOURCE_GROUP}"
export AKS_NODE_RESOURCE_GROUP="${AKS_NODE_RESOURCE_GROUP}"
export MGMT_CLUSTER_KUBECONFIG="${MGMT_CLUSTER_KUBECONFIG}"
export AKS_MI_CLIENT_ID="${AKS_MI_CLIENT_ID}"
export AZURE_CLIENT_ID="${AKS_MI_CLIENT_ID}"
export AKS_MI_OBJECT_ID="${AKS_MI_OBJECT_ID}"
export AKS_MI_RESOURCE_ID="${AKS_MI_RESOURCE_ID}"
export MANAGED_IDENTITY_NAME="${MANAGED_IDENTITY_NAME}"
export MANAGED_IDENTITY_RG="${MANAGED_IDENTITY_RG}"
export AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY="${AKS_MI_CLIENT_ID}"
export CI_RG="${MANAGED_IDENTITY_RG}"
export USER_IDENTITY="${MANAGED_IDENTITY_NAME}"
export CLUSTER_IDENTITY_TYPE="UserAssignedMSI"
export ASO_CREDENTIAL_SECRET_MODE="${ASO_CREDENTIAL_SECRET_MODE}"
export REGISTRY="${REGISTRY}"
export APISERVER_LB_DNS_SUFFIX="${APISERVER_LB_DNS_SUFFIX}"
export AZURE_LOCATION="${AZURE_LOCATION}"
export AKS_MGMT_VNET_NAME="${AKS_MGMT_VNET_NAME}"
export AKS_MGMT_VNET_CIDR="${AKS_MGMT_VNET_CIDR}"
export AKS_MGMT_SERVICE_CIDR="${AKS_MGMT_SERVICE_CIDR}"
export AKS_MGMT_DNS_SERVICE_IP="${AKS_MGMT_DNS_SERVICE_IP}"
export AKS_MGMT_SUBNET_NAME="${AKS_MGMT_SUBNET_NAME}"
export AKS_MGMT_SUBNET_CIDR="${AKS_MGMT_SUBNET_CIDR}"
EOF

cat <<EOF > tilt-settings-temp.yaml
kustomize_substitutions:
MGMT_CLUSTER_NAME: "${MGMT_CLUSTER_NAME}"
Expand Down Expand Up @@ -210,28 +291,28 @@ else
fi

# copy over the existing allowed_contexts to tilt-settings.yaml if it does not exist
allowed_contexts_exists=$(yq eval '.allowed_contexts' tilt-settings.yaml)
allowed_contexts_exists=$(${YQ} eval '.allowed_contexts' tilt-settings.yaml)
if [ "$allowed_contexts_exists" == "null" ]; then
yq eval '.allowed_contexts = load("tilt-settings-temp.yaml") | .allowed_contexts' tilt-settings-temp.yaml > tilt-settings.yaml
${YQ} eval '.allowed_contexts = load("tilt-settings-temp.yaml") | .allowed_contexts' tilt-settings-temp.yaml > tilt-settings.yaml
fi

# extract allowed_contexts from tilt-settings.yaml
current_contexts=$(yq eval '.allowed_contexts' tilt-settings.yaml | sort -u)
current_contexts=$(${YQ} eval '.allowed_contexts' tilt-settings.yaml | sort -u)

# extract allowed_contexts from tilt-settings-new.yaml
new_contexts=$(yq eval '.allowed_contexts' tilt-settings-temp.yaml | sort -u)
new_contexts=$(${YQ} eval '.allowed_contexts' tilt-settings-temp.yaml | sort -u)

# combine current and new contexts, keeping the union of both
combined_contexts=$(echo "$current_contexts"$'\n'"$new_contexts" | sort -u)

# create a temporary file since env($combined_contexts) is not supported in yq
# create a temporary file since env($combined_contexts) is not supported in ${YQ}
echo "$combined_contexts" > combined_contexts.yaml

# update allowed_contexts in tilt-settings.yaml with the combined contexts
yq eval --inplace ".allowed_contexts = load(\"combined_contexts.yaml\")" tilt-settings.yaml
${YQ} eval --inplace ".allowed_contexts = load(\"combined_contexts.yaml\")" tilt-settings.yaml

# merge the updated kustomize_substitution and azure_location with the existing one in tilt-settings.yaml
yq eval-all 'select(fileIndex == 0) *+ {"kustomize_substitutions": select(fileIndex == 1).kustomize_substitutions, "azure_location": select(fileIndex == 1).azure_location}' tilt-settings.yaml tilt-settings-temp.yaml > tilt-settings-new.yaml
${YQ} eval-all 'select(fileIndex == 0) *+ {"kustomize_substitutions": select(fileIndex == 1).kustomize_substitutions, "azure_location": select(fileIndex == 1).azure_location}' tilt-settings.yaml tilt-settings-temp.yaml > tilt-settings-new.yaml

mv tilt-settings-new.yaml tilt-settings.yaml
rm -r combined_contexts.yaml
Expand Down
2 changes: 1 addition & 1 deletion scripts/ci-build-azure-ccm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ main() {
echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container"
az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login > /dev/null
# if the storage account has public access disabled at the account level this will return 404
AZURE_STORAGE_AUTH_MODE=login az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null
# AZURE_STORAGE_AUTH_MODE=login az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null
fi

az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" --auth-mode login
Expand Down
2 changes: 1 addition & 1 deletion scripts/ci-build-kubernetes.sh
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ main() {
if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv --auth-mode login)" == "false" ]]; then
echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container"
az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login > /dev/null
az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login --public-access container > /dev/null
# az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login --public-access container > /dev/null
fi

if [[ "${KUBE_BUILD_CONFORMANCE:-}" =~ [yY] ]]; then
Expand Down
15 changes: 10 additions & 5 deletions scripts/ci-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,16 @@ select_cluster_template() {

create_cluster() {
"${REPO_ROOT}/hack/create-dev-cluster.sh"
if [ ! -f "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" ]; then
echo "Unable to find kubeconfig for kind mgmt cluster ${KIND_CLUSTER_NAME}"
exit 1
fi
"${KUBECTL}" --kubeconfig "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" get clusters -A
if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then
if [ ! -f "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" ]; then
echo "Unable to find kubeconfig for kind mgmt cluster ${KIND_CLUSTER_NAME}"
exit 1
fi
"${KUBECTL}" --kubeconfig "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" get clusters -A
else
"${KUBECTL}" get clusters -A
fi;


# set the SSH bastion and user that can be used to SSH into nodes
KUBE_SSH_BASTION=$(${KUBECTL} get azurecluster -o json | jq '.items[0].spec.networkSpec.apiServerLB.frontendIPs[0].publicIP.dnsName' | tr -d \"):22
Expand Down
Loading