Complete
terraform {
required_version = ">= 1.7.0"
backend "s3" {
bucket = "tf-state-911453050078"
key = "examples/complete.tfstate"
workspace_key_prefix = "terraform-aws-kubernetes-platform"
dynamodb_table = "terraform-lock"
region = "eu-central-1"
}
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
helm = {
source = "hashicorp/helm"
version = "~> 2.6"
}
kubectl = {
source = "alekc/kubectl"
version = "~> 2.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.27"
}
cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 4.0"
}
}
}
provider "aws" {
region = local.region
}
provider "kubernetes" {
host = module.k8s_platform.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.k8s_platform.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", module.k8s_platform.eks.cluster_name]
}
}
provider "helm" {
kubernetes {
host = module.k8s_platform.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.k8s_platform.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", module.k8s_platform.eks.cluster_name]
}
}
}
provider "kubectl" {
apply_retry_count = 5
host = module.k8s_platform.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.k8s_platform.eks.cluster_certificate_authority_data)
load_config_file = false
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", module.k8s_platform.eks.cluster_name]
}
}
locals {
region = "eu-central-1"
}
module "k8s_platform" {
source = "../../"
name = "ex-complete"
cluster_admins = {
cicd = {
role_name = "cicd-iac"
}
}
tags = {
Environment = "sandbox"
GithubRepo = "terraform-aws-kubernetes-platform"
GithubOrg = "tx-pts-dai"
}
vpc = {
enabled = true
cidr = "10.0.0.0/16"
max_az = 3
subnet_configs = [
{ public = 24 },
{ private = 24 },
{ intra = 26 },
{ database = 26 },
{ karpenter = 22 }
]
}
karpenter = {
root_volume_size = "8Gi"
data_volume_size = "80Gi"
}
enable_downscaler = true
enable_amp = true
}
data "aws_secretsmanager_secret_version" "cloudflare" {
secret_id = "dai/cloudflare/tamedia/apiToken"
}
provider "cloudflare" {
api_token = jsondecode(data.aws_secretsmanager_secret_version.cloudflare.secret_string)["apiToken"]
}
locals {
zones = {
"kaas-example.tamedia.tech" = {
comment = "DAI KaaS example complete"
}
}
}
# Manage DNS sub-domaisn in cloudflare and attach them to they parent in route53
module "cloudflare" {
source = "../../modules/cloudflare"
for_each = local.zones
zone_name = module.route53_zones[each.key].route53_zone_name[each.key]
comment = "Managed by KAAS examples"
name_servers = [for i in range(4) : module.route53_zones[each.key].route53_zone_name_servers[each.key][i]]
account_id = jsondecode(data.aws_secretsmanager_secret_version.cloudflare.secret_string)["accountId"]
}
module "route53_zones" {
source = "terraform-aws-modules/route53/aws//modules/zones"
version = "2.11.1"
for_each = local.zones
zones = {
(each.key) = {
comment = each.value.comment
}
}
}
output "eks" {
description = "eks module outputs"
value = module.k8s_platform.eks
}
output "vpc" {
description = "vpc module outputs"
value = module.k8s_platform.network
}
output "zconfigure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
value = "aws eks --region ${local.region} update-kubeconfig --name ${module.k8s_platform.eks.cluster_name} --alias ${module.k8s_platform.eks.cluster_name}"
}
output "karpenter" {
description = "karpenter module outputs"
value = module.k8s_platform.karpenter
}