VIDEO
You can find the source code for this video in my GitHub Repo .
Intro
In this video, we're going to go over the following sections:
Create EKS Cluster Using Terraform
Deploy App to Kubernetes and Expose It with NLB
Create AWS API Gateway Using Terraform
Integrate API Gateway with Amazon EKS
You can find the timestamps in the video description.
In the first section, let's create an EKS cluster and VPC from scratch .
In the provider, we can declare some version constraints as well as some variables such as the EKS cluster name and an EKS version .
terraform/0-provider.tf terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
region = "us-east-1"
}
variable "cluster_name" {
default = "demo"
}
variable "cluster_version" {
default = "1.22"
}
Then we need a VPC.
terraform/1-vpc.tf resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "main"
}
}
Internet gateway.
terraform/2-igw.tf resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.main.id
tags = {
Name = "igw"
}
}
Four subnets, two private and two public.
terraform/3-subnets.tf resource "aws_subnet" "private-us-east-1a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.0.0/19"
availability_zone = "us-east-1a"
tags = {
"Name" = "private-us-east-1a"
"kubernetes.io/role/internal-elb" = "1"
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
}
}
resource "aws_subnet" "private-us-east-1b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.32.0/19"
availability_zone = "us-east-1b"
tags = {
"Name" = "private-us-east-1b"
"kubernetes.io/role/internal-elb" = "1"
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
}
}
resource "aws_subnet" "public-us-east-1a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.64.0/19"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
"Name" = "public-us-east-1a"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
}
}
resource "aws_subnet" "public-us-east-1b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.96.0/19"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
"Name" = "public-us-east-1b"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
}
}
NAT Gateway to provide internet access for private subnets.
terraform/4-nat.tf resource "aws_eip" "nat" {
vpc = true
tags = {
Name = "nat"
}
}
resource "aws_nat_gateway" "nat" {
allocation_id = aws_eip.nat.id
subnet_id = aws_subnet.public-us-east-1a.id
tags = {
Name = "nat"
}
depends_on = [ aws_internet_gateway.igw ]
}
Routes.
terraform/5-routes.tf resource "aws_route_table" "private" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.nat.id
}
tags = {
Name = "private"
}
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
tags = {
Name = "public"
}
}
resource "aws_route_table_association" "private-us-east-1a" {
subnet_id = aws_subnet.private-us-east-1a.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table_association" "private-us-east-1b" {
subnet_id = aws_subnet.private-us-east-1b.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table_association" "public-us-east-1a" {
subnet_id = aws_subnet.public-us-east-1a.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "public-us-east-1b" {
subnet_id = aws_subnet.public-us-east-1b.id
route_table_id = aws_route_table.public.id
}
Then the EKS cluster itself.
terraform/6-eks.tf resource "aws_iam_role" "eks-cluster" {
name = "eks-cluster-${var.cluster_name}"
assume_role_policy = << POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "amazon-eks-cluster-policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.eks-cluster.name
}
resource "aws_eks_cluster" "cluster" {
name = var.cluster_name
version = var.cluster_version
role_arn = aws_iam_role.eks-cluster.arn
vpc_config {
subnet_ids = [
aws_subnet.private-us-east-1a.id ,
aws_subnet.private-us-east-1b.id ,
aws_subnet.public-us-east-1a.id ,
aws_subnet.public-us-east-1b.id
]
}
depends_on = [ aws_iam_role_policy_attachment.amazon-eks-cluster-policy ]
}
Single node group with one node.
terraform/7-nodes.tf resource "aws_iam_role" "nodes" {
name = "eks-node-group-nodes"
assume_role_policy = jsonencode ({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}
resource "aws_iam_role_policy_attachment" "amazon-eks-worker-node-policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.nodes.name
}
resource "aws_iam_role_policy_attachment" "amazon-eks-cni-policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.nodes.name
}
resource "aws_iam_role_policy_attachment" "amazon-ec2-container-registry-read-only" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.nodes.name
}
resource "aws_eks_node_group" "private-nodes" {
cluster_name = aws_eks_cluster.cluster.name
version = var.cluster_version
node_group_name = "private-nodes"
node_role_arn = aws_iam_role.nodes.arn
subnet_ids = [
aws_subnet.private-us-east-1a.id ,
aws_subnet.private-us-east-1b.id
]
capacity_type = "ON_DEMAND"
instance_types = [ "t3.small" ]
scaling_config {
desired_size = 1
max_size = 5
min_size = 0
}
update_config {
max_unavailable = 1
}
labels = {
role = "general"
}
depends_on = [
aws_iam_role_policy_attachment.amazon-eks-worker-node-policy ,
aws_iam_role_policy_attachment.amazon-eks-cni-policy ,
aws_iam_role_policy_attachment.amazon-ec2-container-registry-read-only ,
]
# Allow external changes without Terraform plan difference
lifecycle {
ignore_changes = [ scaling_config[0].desired_size ]
}
}
Now let's create that VPC and a cluster using terraform apply command.
terraform init
terraform apply
As always, when you use terraform to create a cluster, you need to update your Kubernetes context manually.
aws eks update-kubeconfig --name demo --region us-east-1
Then a quick check to get default service.
Deploy App to Kubernetes and Expose It with NLB
Next, we need to deploy an app to Kubernetes and expose it with Network Load Balancer .
You don't need to deploy the AWS Load balancer controller for that; you can just use annotations .
k8s/echo-server.yaml ---
apiVersion : v1
kind : Namespace
metadata :
name : staging
---
apiVersion : apps/v1
kind : Deployment
metadata :
name : echoserver
namespace : staging
spec :
selector :
matchLabels :
app : echoserver
replicas : 1
template :
metadata :
labels :
app : echoserver
spec :
containers :
- image : k8s.gcr.io/e2e-test-images/echoserver:2.5
name : echoserver
ports :
- containerPort : 8080
---
apiVersion : v1
kind : Service
metadata :
name : echoserver
namespace : staging
annotations :
service.beta.kubernetes.io/aws-load-balancer-type : nlb
service.beta.kubernetes.io/aws-load-balancer-internal : "true"
spec :
ports :
- port : 8080
protocol : TCP
type : LoadBalancer
selector :
app : echoserver
Let's go ahead and apply this app.
kubectl apply -f k8s/echo-server.yaml
If you get services in the staging namespace, you should find the hostname of the load balancer, take a note, or copy it.
kubectl get svc -n staging
Integrate API Gateway with Amazon EKS
The following step is to create AWS API Gateway Using Terraform. We're going to be using API version 2.
terraform/8-api-gateway.tf resource "aws_apigatewayv2_api" "main" {
name = "main"
protocol_type = "HTTP"
}
resource "aws_apigatewayv2_stage" "dev" {
api_id = aws_apigatewayv2_api.main.id
name = "dev"
auto_deploy = true
}
Go back to the terminal and apply it.
Integrate API Gateway with Amazon EKS
The final step is to integrate API Gateway with Amazon EKS . For integration, we need to create AWS private link , but before,
we need to create a security group.
terraform/9-integration.tf resource "aws_security_group" "vpc_link" {
name = "vpc-link"
vpc_id = aws_vpc.main.id
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = [ "0.0.0.0/0" ]
}
}
resource "aws_apigatewayv2_vpc_link" "eks" {
name = "eks"
security_group_ids = [ aws_security_group.vpc_link.id ]
subnet_ids = [
aws_subnet.private-us-east-1a.id ,
aws_subnet.private-us-east-1b.id
]
}
resource "aws_apigatewayv2_integration" "eks" {
api_id = aws_apigatewayv2_api.main.id
integration_uri = "arn:aws:elasticloadbalancing:us-east-1:<acc-id>:listener/net/a852b4f6ff0be41dfa1505018b083488/e8cf16c1a71e2a37/59bf9fd068f3f993"
integration_type = "HTTP_PROXY"
integration_method = "ANY"
connection_type = "VPC_LINK"
connection_id = aws_apigatewayv2_vpc_link.eks.id
}
resource "aws_apigatewayv2_route" "get_echo" {
api_id = aws_apigatewayv2_api.main.id
route_key = "GET /echo"
target = "integrations/${aws_apigatewayv2_integration.eks.id}"
}
output "hello_base_url" {
value = "${aws_apigatewayv2_stage.dev.invoke_url}/echo"
}
This is the last time when we run terraform.
Use curl to test integration.
curl https://<your-gw-id>.execute-api.us-east-1.amazonaws.com/dev/echo