问题
I'm having trouble to expose a k8s cluster deployed on AKS with a public IP address. I'm using GitHub Actions to do the deployment. The following are my .tf and deployment.yml files;
Please see below the errors I'm facing.
main.tf
provider "azurerm" {
features {}
}
provider "azuread" {
version = "=0.7.0"
}
terraform {
backend "azurerm" {
resource_group_name = "tstate-rg"
storage_account_name = "tstateidentity11223"
container_name = "tstate"
access_key = "/qSJCUo..."
key = "terraform.tfstate"
}
}
# create resource group
resource "azurerm_resource_group" "aks" {
name = "${var.name_prefix}-rg"
location = var.location
}
}
aks-cluster.tf
resource "azurerm_kubernetes_cluster" "aks" {
name = "${var.name_prefix}-aks"
location = var.location
resource_group_name = var.resourcename
dns_prefix = "${var.name_prefix}-dns"
default_node_pool {
name = "identitynode"
node_count = 3
vm_size = "Standard_D2_v2"
os_disk_size_gb = 30
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
network_profile {
network_plugin = "kubenet"
load_balancer_sku = "Standard"
}
}
nginxlb.tf
# Initialize Helm (and install Tiller)
provider "helm" {
# install_tiller = true
kubernetes {
host = azurerm_kubernetes_cluster.aks.kube_config.0.host
client_certificate = base64decode(azurerm_kubernetes_cluster.aks.kube_config.0.client_certificate)
client_key = base64decode(azurerm_kubernetes_cluster.aks.kube_config.0.client_key)
cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks.kube_config.0.cluster_ca_certificate)
load_config_file = false
}
}
# Add Kubernetes Stable Helm charts repo
data "helm_repository" "stable" {
name = "stable"
url = "https://kubernetes-charts.storage.googleapis.com"
}
# Create Static Public IP Address to be used by Nginx Ingress
resource "azurerm_public_ip" "nginx_ingress" {
name = "nginx-ingress-pip"
location = azurerm_kubernetes_cluster.aks.location
resource_group_name = azurerm_kubernetes_cluster.aks.node_resource_group
allocation_method = "Static"
domain_name_label = var.name_prefix
}
# Install Nginx Ingress using Helm Chart
resource "helm_release" "nginx" {
name = "nginx-ingress"
repository = data.helm_repository.stable.url
#repository = data.helm_repository.stable.metadata.0.name
chart = "nginx-ingress"
# namespace = "kube-system"
namespace = "default"
set {
name = "rbac.create"
value = "false"
}
set {
name = "controller.service.externalTrafficPolicy"
value = "Local"
}
set {
name = "controller.service.loadBalancerIP"
value = azurerm_public_ip.nginx_ingress.ip_address
}
}
And my deployment.yml
apiVersion: v1
kind: Namespace
metadata:
name:
namespace: default
---
apiVersion: v1
kind: Service
metadata:
name: identity-svc
namespace: default
labels:
name: identity-svc
env: dev
app: identity-svc
annotations:
service.beta.kubernetes.io/azure-load-balancer-resource-group: MC_identity-k8s-rg_identity-k8s-aks_westeurope
# nginx.ingress.kubernetes.io/rewrite-target: /$1
spec:
loadBalancerIP: 13.95.67.206
type: LoadBalancer ## NodePort,ClusterIP,LoadBalancer --> Ingress Controller:nginx,HAProxy
ports:
- name: http
port: 8000
targetPort: 8000
nodePort: 30036
protocol: TCP
selector:
app: identity-svc
---
apiVersion: v1
data:
.dockerconfigjson: eyJhdXRocyI6eyJpZGVudGl0eXNlcnZpY2UuYXp1cmVjVZWcVpYS2o4QTM3RmsvZEZZbTlrbHQiLCJlbWFpbCI6InN1YmplQHN1YmplLmNvbSIsImF1dGgiOiJ
kind: Secret
metadata:
creationTimestamp: null
name: acr-secret
namespace: default
type: kubernetes.io/dockerconfigjson
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: identity-deploy
namespace: default
labels:
name: identity-app
env: dev
spec:
replicas: 1
selector:
matchLabels:
app: identity-svc
template:
metadata:
namespace: default
labels:
app: identity-svc
spec:
#backoffLimit: 1
imagePullSecrets:
- name: acr-secret
containers:
- name: identitysvc
image: identitysvc.azurecr.io/identitysvc:${{ github.run_id }}
env:
- name: SECRET_KEY
value: ${SECRET_KEY}
- name: DOPPLER_TOKEN
value: ${DOPPLER_TOKEN}
resources:
requests:
cpu: 0.5
memory: "500Mi"
limits:
cpu: 2
memory: "1000Mi"
ports:
- containerPort: 8000
name: http
imagePullPolicy: Always
restartPolicy: Always
The following are the error messages from GitHub Actions log and Kubectl on Azure.
GitHub Actions log; This message gets repeated until timeout.
Kubectl logs on AKS;
kubectl describe svc
Name: nginx-ingress-controller
Namespace: default
Labels: app=nginx-ingress
app.kubernetes.io/managed-by=Helm
chart=nginx-ingress-1.41.3
component=controller
heritage=Helm
release=nginx-ingress
Annotations: meta.helm.sh/release-name: nginx-ingress
meta.helm.sh/release-namespace: default
Selector: app.kubernetes.io/component=controller,app=nginx-ingress,release=nginx-ingress
Type: LoadBalancer
IP: 10.0.153.66
IP: 13.95.67.206
Port: http 8000/TCP
TargetPort: http/TCP
NodePort: http 30933/TCP
Endpoints: 10.244.1.6:8000
Port: https 443/TCP
TargetPort: https/TCP
NodePort: https 32230/TCP
Endpoints: 10.244.1.6:443
Session Affinity: None
External Traffic Policy: Local
HealthCheck NodePort: 32755
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal EnsuringLoadBalancer 4m17s (x43 over 3h10m) service-controller Ensuring load balancer
Warning CreateOrUpdateLoadBalancer 4m16s (x43 over 3h10m) azure-cloud-provider Code="PublicIPAndLBSkuDoNotMatch" Message="Standard sku load balancer /subscriptions/e90bd4d0-3b50-4a27-a7e8-bc88cf5f5398/resourceGroups/mc_identity-k8s-rg_identity-k8s-aks_westeurope/providers/Microsoft.Network/loadBalancers/kubernetes cannot reference Basic sku publicIP /subscriptions/e90bd4d0-3b50-4a27-a7e8-bc88cf5f5398/resourceGroups/MC_identity-k8s-rg_identity-k8s-aks_westeurope/providers/Microsoft.Network/publicIPAddresses/nginx-ingress-pip." Details=[]
kubectl logs
I1108 12:52:52.862797 7 flags.go:205] Watching for Ingress class: nginx
W1108 12:52:52.863034 7 flags.go:250] SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)
W1108 12:52:52.863078 7 client_config.go:552] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.
I1108 12:52:52.863272 7 main.go:231] Creating API client for https://10.0.0.1:443
-------------------------------------------------------------------------------
NGINX Ingress controller
Release: v0.34.1
Build: v20200715-ingress-nginx-2.11.0-8-gda5fa45e2
Repository: https://github.com/kubernetes/ingress-nginx
nginx version: nginx/1.19.1
-------------------------------------------------------------------------------
I1108 12:52:52.892455 7 main.go:275] Running in Kubernetes cluster version v1.17 (v1.17.13) - git (clean) commit 30d651da517185653e34e7ab99a792be6a3d9495 - platform linux/amd64
I1108 12:52:52.897887 7 main.go:87] Validated default/nginx-ingress-default-backend as the default backend.
I1108 12:52:53.229870 7 main.go:105] SSL fake certificate created /etc/ingress-controller/ssl/default-fake-certificate.pem
W1108 12:52:53.252657 7 store.go:659] Unexpected error reading configuration configmap: configmaps "nginx-ingress-controller" not found
I1108 12:52:53.268067 7 nginx.go:263] Starting NGINX Ingress controller
I1108 12:52:54.468656 7 leaderelection.go:242] attempting to acquire leader lease default/ingress-controller-leader-nginx...
I1108 12:52:54.468691 7 nginx.go:307] Starting NGINX process
W1108 12:52:54.469222 7 controller.go:395] Service "default/nginx-ingress-default-backend" does not have any active Endpoint
I1108 12:52:54.469249 7 controller.go:141] Configuration changes detected, backend reload required.
I1108 12:52:54.473464 7 status.go:86] new leader elected: nginx-ingress-controller-6b45fcd8ff-7mbx4
I1108 12:52:54.543113 7 controller.go:157] Backend successfully reloaded.
I1108 12:52:54.543152 7 controller.go:166] Initial sync, sleeping for 1 second.
W1108 12:52:58.251867 7 controller.go:395] Service "default/nginx-ingress-default-backend" does not have any active Endpoint
I1108 12:53:38.008002 7 leaderelection.go:252] successfully acquired lease default/ingress-controller-leader-nginx
I1108 12:53:38.008203 7 status.go:86] new leader elected: nginx-ingress-controller-6b45fcd8ff-njgjs
Help me understand what am I missing here? This whole process is me trying to deploy a simple Python service on a public IP address. I'm just trying to expose the service on a public IP it doesn't matter currently with which method whether nginx or any other loadbalancing service.
Also before I implement nginx ingress in the Terraform files I could see identity-svc running when I ran kubectl get services
but now I can't even see that service its only the nginx ingress controller. I really appreciate any help.
Edit: After adding sku standard to the public IP creation as @mynko mentioned the workflow could run successfully. Now when I check the following;
admin@Azure:~$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
identity-svc LoadBalancer 10.0.188.32 20.56.242.212 8000:30036/TCP 22m
kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 7h28m
nginx-ingress-controller LoadBalancer 10.0.230.164 20.50.221.84 8000:31742/TCP,443:31675/TCP 23m
nginx-ingress-default-backend ClusterIP 10.0.229.217 <none> 8000/TCP 23m
I get this I'm not sure why nginx-ingress-controller
is looking at port 80 instead of 8000. Also when I try to access `20.56.242.212:8000 nothing loads up. Also in this case which one should be my exposed public IP?
When I access 20.50.221.84
it shows default backend - 404
回答1:
Have a look at the kubernetes service warning message.
Code="PublicIPAndLBSkuDoNotMatch"
You're using a Basic SKU Public IP, change it to Standard.
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/public_ip#sku
来源:https://stackoverflow.com/questions/64740298/how-to-expose-an-azure-kubernetes-cluster-with-a-public-ip-address-using-terrafo